repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
brunompacheco/part-counting | [
"dbf71e7465c8e384e3c60694f65819a65742193b",
"dbf71e7465c8e384e3c60694f65819a65742193b",
"dbf71e7465c8e384e3c60694f65819a65742193b"
] | [
"src/features/base.py",
"src/data/base.py",
"src/visualization/test_results.py"
] | [
"from pathlib import Path\n\nimport numpy as np\nimport open3d as o3d\nimport torch\n\nfrom src.data.rgbd import load_rgbd\nfrom src.data.pcd import load_pcd\nfrom .cropping import mask_selection_volume, box_mask_from_rgbd\n\n\ndef preprocess_box_for_cv(img_fpath: Path) -> o3d.geometry.PointCloud:\n \"\"\"Load and strip walls of box, keeping the interior. For CV-based models.\n\n The mask of the interior of the box is extracted using Canny+Hough, which\n is then used to crop the point cloud generated from the RGBD image.\n\n Args:\n img_fpath: Filepath of the .exr image file. Must contain grayscale as\n the first channel and depth as second channel.\n \n Returns:\n box: Point cloud image of the interior of the box.\n \"\"\"\n rgbd = load_rgbd(img_fpath)\n\n box_mask = box_mask_from_rgbd(rgbd)\n\n vol = mask_selection_volume(rgbd, box_mask)\n\n pcd = load_pcd(rgbd)\n\n box = vol.crop_point_cloud(pcd)\n\n return box\n\ndef load_part_model(part_fpath: Path, number_of_points=10000) -> o3d.geometry.PointCloud:\n \"\"\"Load part model as a point cloud image in meters.\n\n Args:\n part_fpath: Filepath of the .stl model file.\n number_of_points: For the resulting point cloud, which is sampled\n uniformly.\n \n Returns:\n part: Point cloud of the part, sampled uniformly.\n \"\"\"\n part_mesh = o3d.io.read_triangle_mesh(str(part_fpath), enable_post_processing=True)\n\n part_mesh.paint_uniform_color([1., 0., 0.,])\n\n part = part_mesh.sample_points_uniformly(number_of_points=number_of_points)\n\n part_points = np.array(part.points) / 1000 # mm to meter conversion\n part_points = part_points + np.array([0,0,0.3])\n part_points = o3d.utility.Vector3dVector(part_points)\n part.points = part_points\n\n return part\n\ndef preprocess_box_for_dl(img_fpath: Path, device: torch.device = None) -> torch.Tensor:\n \"\"\"Load box picture and reshape it. For DL-based models.\n\n Args:\n img_fpath: Filepath of the .png image file.\n device: Torch device where to load the image.\n\n Returns:\n X: Image loaded in a batch-like format (batch with a single sample),\n proper for feeding to a model.\n \"\"\"\n from torchvision import transforms as T\n\n if device is None:\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n transform = T.Compose([\n T.ToTensor(),\n # normalize required for pre-trained image models,\n # check https://pytorch.org/vision/stable/models.html\n T.Normalize(mean=[0.485, 0.456], std=[0.229, 0.224]),\n ])\n\n data = np.array(o3d.io.read_image(str(img_fpath)))\n data = data[:,:,1:3]\n\n X = transform(data)\n X = X.unsqueeze(0)\n X = X.type(torch.FloatTensor)\n X = X.to(device)\n\n return X\n",
"\"\"\"Fundamental structures and utils.\n\"\"\"\nfrom pathlib import Path\nfrom typing import Tuple, Union\n\nimport h5py\nimport numpy as np\n\nfrom torch import Tensor\nfrom torch.utils.data import Dataset, Subset\nfrom torchvision import transforms as T\n\n\nclass FimacDataset(Dataset):\n def __init__(self, hdf5_fpath: Union[Path, str],\n test=False) -> None:\n super().__init__()\n\n self._hdf5_file = h5py.File(str(hdf5_fpath), \"r\")\n self._hdf5_dataset_name = 'test' if test else 'train'\n\n self.transform = T.Compose([\n T.ToTensor(),\n # normalize required for pre-trained image models,\n # check https://pytorch.org/vision/stable/models.html\n T.Normalize(mean=[0.485, 0.456], std=[0.229, 0.224]),\n ])\n\n @property\n def _data(self) -> h5py.Dataset:\n return self._hdf5_file[self._hdf5_dataset_name]\n\n def __len__(self) -> int:\n return self._data.shape[0] * self._data.shape[1]\n\n def __getitem__(self, index) -> Tuple[Tensor, int]:\n i = index // self._data.shape[1] # render index\n j = index % self._data.shape[1] # render step (# of parts) index\n\n image = self._data[i,j,:,:,:]\n # torchvision's normalize expects numpy array in shape (H x W x C)\n image = self.transform(np.moveaxis(image, 0, -1))\n\n label = j\n\n return image, label\n\n def __del__(self) -> None:\n self._hdf5_file.close()\n\n def _subset(self, frac: float) -> np.array:\n assert frac <= 1 and frac > 0, '`frac` must be <=1 and >0'\n\n renders = np.arange(self._data.shape[0])\n parts_per_render = self._data.shape[1]\n\n frac_renders = np.random.choice(renders, int(frac*len(renders)),\n replace=False)\n\n indices = [np.arange(parts_per_render * render,\n parts_per_render * (render + 1))\n for render in frac_renders]\n indices = np.concatenate(indices)\n\n return indices\n\n def subset(self, frac: float) -> Subset:\n \"\"\"Return a fraction of this dataset without contamining the remaining.\n\n The fraction is extracted in such a way that for a given render, either\n all of its images are in the subset or none is.\n\n Args:\n frac: percentage of the dataset to be returned.\n \"\"\"\n indices = self._subset(frac)\n\n return Subset(self, indices)\n \n def split(self, split_size: float) -> Tuple[Subset, Subset]:\n \"\"\"Generate a split (train-test) of the dataset.\n\n Partitions the dataset into two subsets in such a way that for all\n renders, their images are all in either one of the subsets, that is,\n avoiding two subsets containing (distinct) images of the same render.\n\n Args:\n split_size: split_size of the renders that the first subset will contain.\n \"\"\"\n assert split_size <= 1 and split_size > 0, '`split_size` must be <=1 and >0'\n\n indices_a = self._subset(split_size)\n\n indices_b = filter(lambda i: i not in indices_a, np.arange(len(self)))\n\n return Subset(self, indices_a), Subset(self, indices_b)\n \n def subset_split(self, frac: float, split_size: float) -> Tuple[Subset, Subset]:\n \"\"\"Generate a split (train-test) of a fraction of the dataset.\n\n A combination of `self.subset()` and `self.split`.\n\n Args:\n frac: percentage of the dataset to be used for the split.\n split_size: controls percentage of renders in the dataset fraction\n that will be in the first subset.\n \"\"\"\n assert frac <= 1 and frac > 0, '`frac` must be <=1 and >0'\n\n renders = np.arange(self._data.shape[0])\n parts_per_render = self._data.shape[1]\n\n frac_renders = np.random.choice(\n renders,\n int(frac*len(renders)),\n replace=False\n )\n\n renders_a = np.random.choice(\n frac_renders,\n int(split_size*len(frac_renders)),\n replace=False\n )\n renders_b = filter(lambda r: r not in renders_a, frac_renders)\n\n indices_a = np.concatenate([\n np.arange(parts_per_render * render,\n parts_per_render * (render + 1))\n for render in renders_a\n ])\n indices_b = np.concatenate([\n np.arange(parts_per_render * render,\n parts_per_render * (render + 1))\n for render in renders_b\n ])\n\n return Subset(self, indices_a), Subset(self, indices_b)\n",
"import json\n\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom dotenv import load_dotenv, find_dotenv\n\n# find .env automagically by walking up directories until it's found\ndotenv_path = find_dotenv()\nproject_dir = Path(dotenv_path).parent\n\n# load up the entries as environment variables\nload_dotenv(dotenv_path)\n\n\nif __name__ == '__main__':\n preds_fpaths = project_dir.glob('models/*_test_preds.json')\n\n # prepare data\n dfs = list()\n for preds_fpath in preds_fpaths:\n model = preds_fpath.name.split('_')[0]\n\n times_fpath = preds_fpath.parent/preds_fpath.name.replace('preds', 'times')\n\n with open(preds_fpath, 'r') as f:\n preds = json.load(f)\n\n with open(times_fpath, 'r') as f:\n times = json.load(f)\n\n sim = np.array([s.split('.')[0] for s in preds.keys()])\n y_hat = np.array(list(preds.values()))\n y_hat = pd.Series(y_hat, sim)\n\n sim = np.array([s.split('.')[0] for s in times.keys()])\n t = np.array(list(times.values()))\n t = pd.Series(t, sim)\n\n df = pd.DataFrame([y_hat, t]).T\n df = df.reset_index()\n df.columns = ['Sim', 'y_hat', 't']\n\n df['model'] = model\n\n dfs.append(df)\n\n df = pd.concat(dfs)\n\n df['y'] = df['Sim'].apply(lambda s: int(s.split('_')[-1]))\n df['error'] = (df['y'] - df['y_hat']).abs()\n\n # inference times\n fig, ax = plt.subplots()\n fig.set_size_inches(8,8)\n\n bpdict = df.boxplot('t', 'model', ax=ax, return_type='dict')\n bpdict = bpdict['t']\n\n ax.set_title('Prediction time')\n ax.set_xlabel('Models')\n ax.set_ylabel('Time')\n\n ax.set_yscale('log')\n ax.grid(True)\n\n curr_labels = [t.get_text() for t in ax.get_xticklabels()]\n for i, model in enumerate(curr_labels):\n median = bpdict['medians'][i].get_ydata()[0]\n plt.annotate(f\"{median:.2f}\", (i+1.25, median))\n\n fig.suptitle('')\n fig.savefig(project_dir/'reports/figures/time_boxplot.png', bbox_inches='tight')\n plt.close(fig)\n\n # performance by number of parts\n fig, ax = plt.subplots()\n fig.set_size_inches(8,8)\n\n labels = dict()\n for model in df['model'].unique():\n df_ = df[df['model'] == model]\n\n hits = (df_['y_hat'] + 0.5).astype(int) == df_['y']\n acc = hits.sum() / hits.size\n\n labels[model] = f\"{model}\\n(Acc. = {acc*100:.1f}%)\"\n\n err_mean = df_.groupby('y')['error'].mean()\n err_std = df_.groupby('y')['error'].std()\n\n ax.fill_between(\n err_mean.index,\n err_mean + err_std,\n err_mean - err_std,\n alpha=0.5\n )\n ax.plot(err_mean, label=model)\n\n ax.set_xlim(0, 100)\n ax.set_ylim(0.1, ax.get_ylim()[1])\n\n ax.set_title('Performance by number of parts in box')\n ax.set_xlabel('Number of parts')\n ax.set_ylabel('Absolute error')\n ax.set_yscale('log')\n\n ax.legend()\n ax.grid()\n\n fig.savefig(project_dir/'reports/figures/nparts_error_line.png', bbox_inches='tight')\n plt.close(fig)\n\n # average performances\n fig, ax = plt.subplots()\n fig.set_size_inches(8,8)\n\n bpdict = df.boxplot('error', 'model', ax=ax, return_type='dict')\n bpdict = bpdict['error']\n\n ax.set_title('Performance on the test set')\n ax.set_xlabel('Models')\n ax.set_ylabel('Absolute error')\n\n ax.set_ylim(0., ax.get_ylim()[1])\n ax.grid(True)\n\n curr_labels = [t.get_text() for t in ax.get_xticklabels()]\n ax.set_xticklabels([labels[model] for model in curr_labels])\n\n for i, model in enumerate(curr_labels):\n median = bpdict['medians'][i].get_ydata()[0]\n plt.annotate(f\"{median:.2f}\", (i+1.25, median))\n\n fig.suptitle('')\n fig.savefig(project_dir/'reports/figures/error_boxplot.png', bbox_inches='tight')\n plt.close(fig)\n"
] | [
[
"torch.device",
"numpy.array",
"torch.cuda.is_available"
],
[
"numpy.concatenate",
"numpy.arange",
"torch.utils.data.Subset",
"numpy.moveaxis"
],
[
"pandas.concat",
"pandas.Series",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
carefree0910/carefree-flow | [
"7035015a072cf8142074d01683889f90950d2939",
"7035015a072cf8142074d01683889f90950d2939"
] | [
"cflow/misc/internal_/data.py",
"cflow/misc/toolkit.py"
] | [
"import os\n\nimport numpy as np\n\nfrom abc import abstractmethod\nfrom abc import ABCMeta\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Type\nfrom typing import Tuple\nfrom typing import Callable\nfrom typing import Optional\nfrom cftool.misc import Saving\nfrom oneflow.utils.data import Dataset\nfrom oneflow.utils.data import SequentialSampler\nfrom oneflow.utils.data import DataLoader as FlowDataLoader\n\nfrom ...types import arrays_type\nfrom ...types import sample_weights_type\nfrom ...protocol import DatasetProtocol\nfrom ...protocol import DataLoaderProtocol\nfrom ...misc.toolkit import to_flow\nfrom ...misc.toolkit import WithRegister\n\n\ndata_modules: Dict[str, Type[\"DataModule\"]] = {}\n\n\nclass DataModule(WithRegister, metaclass=ABCMeta):\n d: Dict[str, Type[\"DataModule\"]] = data_modules\n\n id_file = \"id.txt\"\n info_name = \"info\"\n package_folder = \"data_module\"\n\n @property\n @abstractmethod\n def info(self) -> Dict[str, Any]:\n pass\n\n def prepare(self, sample_weights: sample_weights_type) -> None:\n pass\n\n def initialize(self) -> Any:\n pass\n\n def save(self, folder: str) -> None:\n folder = os.path.join(folder, self.package_folder)\n os.makedirs(folder, exist_ok=True)\n with open(os.path.join(folder, self.id_file), \"w\") as f:\n f.write(self.__identifier__)\n self.save_info(folder)\n\n def save_info(self, folder: str) -> None:\n Saving.save_dict(self.info, self.info_name, folder)\n\n @classmethod\n def load(cls, folder: str) -> Dict[str, Any]:\n folder = os.path.join(folder, cls.package_folder)\n with open(os.path.join(folder, cls.id_file), \"r\") as f:\n base = cls.get(f.read())\n return base.load_info(folder)\n\n @classmethod\n def load_info(cls, folder: str) -> Dict[str, Any]:\n return Saving.load_dict(cls.info_name, folder)\n\n\[email protected](\"dl\")\nclass DLDataModule(DataModule, metaclass=ABCMeta):\n train_loader: DataLoaderProtocol\n valid_loader: Optional[DataLoaderProtocol]\n\n def initialize(self) -> Tuple[DataLoaderProtocol, Optional[DataLoaderProtocol]]:\n pass\n\n\ndef get_weighted_indices(n: int, weights: Optional[np.ndarray]) -> np.ndarray:\n indices = np.arange(n)\n if weights is not None:\n numbers = np.random.multinomial(n, weights)\n indices = indices.repeat(numbers)\n return indices\n\n\[email protected](\"ml\")\nclass MLDataset(DatasetProtocol):\n def __init__(self, x: np.ndarray, y: Optional[np.ndarray]):\n super().__init__()\n self.x = x\n self.y = y\n\n def __len__(self) -> int:\n return len(self.x)\n\n\[email protected](\"ml\")\nclass MLLoader(DataLoaderProtocol):\n data: MLDataset\n cursor: int\n indices: np.ndarray\n\n def __init__(\n self,\n data: MLDataset,\n shuffle: bool,\n *,\n name: Optional[str] = None,\n batch_size: int = 128,\n sample_weights: Optional[np.ndarray] = None,\n ):\n if sample_weights is not None and len(data) != len(sample_weights):\n raise ValueError(\n f\"the number of data samples ({len(data)}) is not identical with \"\n f\"the number of sample weights ({len(sample_weights)})\"\n )\n super().__init__(sample_weights=sample_weights)\n self.data = data\n self.shuffle = shuffle\n self.shuffle_backup = shuffle\n self.name = name\n self.batch_size = batch_size\n\n def __iter__(self) -> \"MLLoader\":\n self.cursor = 0\n self.indices = get_weighted_indices(len(self.data), self.sample_weights)\n if self.shuffle:\n np.random.shuffle(self.indices)\n return self\n\n def __next__(self) -> arrays_type:\n start = self.cursor\n if start >= len(self.data):\n raise StopIteration\n self.cursor += self.batch_size\n indices = self.indices[start : self.cursor]\n return (\n to_flow(self.data.x[indices]),\n None if self.data.y is None else to_flow(self.data.y[indices]),\n to_flow(indices),\n )\n\n def disable_shuffle(self) -> None:\n self.shuffle = False\n\n def recover_shuffle(self) -> None:\n self.shuffle = self.shuffle_backup\n\n def copy(self) -> \"MLLoader\":\n return MLLoader(\n self.data,\n self.shuffle,\n name=self.name,\n batch_size=self.batch_size,\n sample_weights=self.sample_weights,\n )\n\n\[email protected](\"cv\")\nclass CVDataset(DatasetProtocol):\n def __init__(self, dataset: Dataset):\n super().__init__()\n self.dataset = dataset\n\n def __len__(self) -> int:\n return len(self.dataset) # type: ignore\n\n def __getitem__(self, item: Any) -> Any:\n return self.dataset[item]\n\n\nclass DataLoader(FlowDataLoader):\n def __setattr__(self, attr: str, val: Any) -> None:\n if self.__initialized and attr in (\n \"batch_size\",\n \"batch_sampler\",\n \"drop_last\",\n \"dataset\",\n \"persistent_workers\",\n ):\n raise ValueError(\n f\"{attr} attribute should not be set after \"\n f\"{self.__class__.__name__} is initialized\"\n )\n\n super(FlowDataLoader, self).__setattr__(attr, val)\n\n\[email protected](\"cv\")\nclass CVLoader(DataLoaderProtocol):\n data: CVDataset\n\n def __init__(\n self,\n loader: DataLoader,\n batch_callback: Optional[Callable[[Any], arrays_type]] = None,\n *,\n sample_weights: Optional[np.ndarray] = None,\n ):\n if sample_weights is not None:\n raise ValueError(\n \"in `CVLoader`, we should introduce `sample_weights` to the original \"\n \"OneFlow `DataLoader` (by specifying corresponding samplers)\"\n )\n super().__init__(sample_weights=sample_weights)\n self.loader = loader\n self.data = loader.dataset # type: ignore\n self.batch_callback = batch_callback\n self.sampler_backup = loader.sampler\n self._iterator: Optional[Any] = None\n\n def __iter__(self) -> \"CVLoader\":\n self._iterator = self.loader.__iter__()\n return self\n\n def __next__(self) -> arrays_type:\n batch = self._iterator.__next__() # type: ignore\n if self.batch_callback is None:\n return batch\n return self.batch_callback(batch)\n\n def __len__(self) -> int:\n return len(self.loader)\n\n @property\n def batch_size(self) -> int: # type: ignore\n # TODO : consider world size\n # batch_size = self.loader.batch_size\n # if dist.is_initialized():\n # batch_size *= dist.get_world_size()\n # return batch_size\n return self.loader.batch_size\n\n def copy(self) -> \"CVLoader\":\n dataset = self.data.dataset\n self.data.__dict__.pop(\"dataset\")\n copied = super().copy()\n assert isinstance(copied, CVLoader)\n self.data.dataset = copied.data.dataset = dataset\n return copied\n\n def disable_shuffle(self) -> None:\n sampler = SequentialSampler(self.data)\n self.loader.sampler = sampler\n if hasattr(self.loader, \"batch_sampler\"):\n self.loader.batch_sampler.sampler = sampler\n\n def recover_shuffle(self) -> None:\n self.loader.sampler = self.sampler_backup\n if hasattr(self.loader, \"batch_sampler\"):\n self.loader.batch_sampler.sampler = self.sampler_backup\n\n\n__all__ = [\n \"DataModule\",\n \"DLDataModule\",\n \"MLDataset\",\n \"MLLoader\",\n \"CVDataset\",\n \"CVLoader\",\n \"DataLoader\",\n \"get_weighted_indices\",\n]\n",
"import os\nimport json\nimport math\nimport time\nimport shutil\nimport inspect\nimport logging\nimport urllib.request\n\nimport numpy as np\nimport oneflow as flow\nimport oneflow.nn as nn\nimport matplotlib.pyplot as plt\nimport oneflow.nn.functional as F\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom tqdm import tqdm\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Type\nfrom typing import Tuple\nfrom typing import Union\nfrom typing import Generic\nfrom typing import TypeVar\nfrom typing import Callable\nfrom typing import Optional\nfrom typing import ContextManager\nfrom zipfile import ZipFile\nfrom argparse import Namespace\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom collections import OrderedDict\nfrom cftool.misc import register_core\nfrom cftool.misc import show_or_save\nfrom cftool.misc import shallow_copy_dict\nfrom cftool.misc import context_error_handler\nfrom cftool.misc import LoggingMixin\n\nfrom ..types import arr_type\nfrom ..types import arrays_type\nfrom ..types import general_config_type\nfrom ..types import sample_weights_type\nfrom ..constants import TIME_FORMAT\nfrom ..constants import WARNING_PREFIX\n\n\n# general\n\n\ndef _parse_config(config: general_config_type) -> Dict[str, Any]:\n if config is None:\n return {}\n if isinstance(config, str):\n with open(config, \"r\") as f:\n return json.load(f)\n return shallow_copy_dict(config)\n\n\ndef prepare_workplace_from(workplace: str, timeout: timedelta = timedelta(7)) -> str:\n current_time = datetime.now()\n if os.path.isdir(workplace):\n for stuff in os.listdir(workplace):\n if not os.path.isdir(os.path.join(workplace, stuff)):\n continue\n try:\n stuff_time = datetime.strptime(stuff, TIME_FORMAT)\n stuff_delta = current_time - stuff_time\n if stuff_delta > timeout:\n print(\n f\"{WARNING_PREFIX}{stuff} will be removed \"\n f\"(already {stuff_delta} ago)\"\n )\n shutil.rmtree(os.path.join(workplace, stuff))\n except:\n pass\n workplace = os.path.join(workplace, current_time.strftime(TIME_FORMAT))\n os.makedirs(workplace)\n return workplace\n\n\ndef get_latest_workplace(root: str) -> Optional[str]:\n all_workplaces = []\n for stuff in os.listdir(root):\n if not os.path.isdir(os.path.join(root, stuff)):\n continue\n try:\n datetime.strptime(stuff, TIME_FORMAT)\n all_workplaces.append(stuff)\n except:\n pass\n if not all_workplaces:\n return None\n return os.path.join(root, sorted(all_workplaces)[-1])\n\n\ndef sort_dict_by_value(d: Dict[Any, Any], *, reverse: bool = False) -> OrderedDict:\n sorted_items = sorted([(v, k) for k, v in d.items()], reverse=reverse)\n return OrderedDict({item[1]: item[0] for item in sorted_items})\n\n\ndef to_standard(arr: np.ndarray) -> np.ndarray:\n if is_int(arr):\n arr = arr.astype(np.int64)\n elif is_float(arr):\n arr = arr.astype(np.float32)\n return arr\n\n\ndef parse_args(args: Any) -> Namespace:\n return Namespace(**{k: None if not v else v for k, v in args.__dict__.items()})\n\n\ndef parse_path(path: Optional[str], root_dir: Optional[str]) -> Optional[str]:\n if path is None:\n return None\n if root_dir is None:\n return path\n return os.path.abspath(os.path.join(root_dir, path))\n\n\ndef get_arguments(*, pop_class_attributes: bool = True) -> Dict[str, Any]:\n frame = inspect.currentframe().f_back # type: ignore\n if frame is None:\n raise ValueError(\"`get_arguments` should be called inside a frame\")\n arguments = inspect.getargvalues(frame)[-1]\n if pop_class_attributes:\n arguments.pop(\"self\", None)\n arguments.pop(\"__class__\", None)\n return arguments\n\n\ndef download_dataset(\n name: str,\n *,\n root: str = os.getcwd(),\n remove_zip: Optional[bool] = None,\n extract_zip: bool = True,\n prefix: str = \"https://github.com/carefree0910/datasets/releases/download/latest/\",\n) -> None:\n os.makedirs(root, exist_ok=True)\n file = f\"{name}.zip\"\n tgt_zip_path = os.path.join(root, file)\n with DownloadProgressBar(unit=\"B\", unit_scale=True, miniters=1, desc=name) as t:\n urllib.request.urlretrieve(\n f\"{prefix}{file}\",\n filename=tgt_zip_path,\n reporthook=t.update_to,\n )\n if extract_zip:\n with ZipFile(tgt_zip_path, \"r\") as zip_ref:\n zip_ref.extractall(os.path.join(root, name))\n if remove_zip is None:\n remove_zip = extract_zip\n if remove_zip:\n if extract_zip:\n os.remove(tgt_zip_path)\n else:\n print(f\"{WARNING_PREFIX}zip file is not extracted, so we'll not remove it!\")\n\n\ndef _rmtree(folder: str, patience: float = 10.0) -> None:\n if not os.path.isdir(folder):\n return None\n t = time.time()\n while True:\n try:\n if time.time() - t >= patience:\n prefix = LoggingMixin.warning_prefix\n print(f\"\\n{prefix}failed to rmtree: {folder}\")\n break\n shutil.rmtree(folder)\n break\n except:\n print(\"\", end=\".\", flush=True)\n time.sleep(1)\n\n\nT = TypeVar(\"T\")\n\n\nclass WithRegister(Generic[T]):\n d: Dict[str, Type[T]]\n __identifier__: str\n\n @classmethod\n def get(cls, name: str) -> Type[T]:\n return cls.d[name]\n\n @classmethod\n def make(cls, name: str, config: Dict[str, Any]) -> T:\n return cls.get(name)(**config) # type: ignore\n\n @classmethod\n def make_multiple(\n cls,\n names: Union[str, List[str]],\n configs: Optional[Dict[str, Any]] = None,\n ) -> Union[T, List[T]]:\n if configs is None:\n configs = {}\n if isinstance(names, str):\n return cls.make(names, configs) # type: ignore\n return [\n cls.make(name, shallow_copy_dict(configs.get(name, {}))) # type: ignore\n for name in names\n ]\n\n @classmethod\n def register(cls, name: str) -> Callable[[Type], Type]:\n def before(cls_: Type) -> None:\n cls_.__identifier__ = name\n\n return register_core(name, cls.d, before_register=before)\n\n\nclass WeightsStrategy:\n def __init__(self, strategy: Optional[str]):\n self.strategy = strategy\n\n def __call__(self, num_train: int, num_valid: int) -> sample_weights_type:\n if self.strategy is None:\n return None\n return getattr(self, self.strategy)(num_train, num_valid)\n\n def linear_decay(self, num_train: int, num_valid: int) -> sample_weights_type:\n return np.linspace(0, 1, num_train + 1)[1:]\n\n def radius_decay(self, num_train: int, num_valid: int) -> sample_weights_type:\n return np.sin(np.arccos(1.0 - np.linspace(0, 1, num_train + 1)[1:]))\n\n def log_decay(self, num_train: int, num_valid: int) -> sample_weights_type:\n return np.log(np.arange(num_train) + np.e)\n\n def sigmoid_decay(self, num_train: int, num_valid: int) -> sample_weights_type:\n x = np.linspace(-5.0, 5.0, num_train)\n return 1.0 / (1.0 + np.exp(-x))\n\n def visualize(self, export_path: str = \"weights_strategy.png\") -> None:\n n = 1000\n x = np.linspace(0, 1, n)\n y = self(n, 0)\n if isinstance(y, tuple):\n y = y[0]\n plt.figure()\n plt.plot(x, y)\n show_or_save(export_path)\n\n\nclass LoggingMixinWithRank(LoggingMixin):\n is_rank_0: bool = True\n\n def set_rank_0(self, value: bool) -> None:\n self.is_rank_0 = value\n for v in self.__dict__.values():\n if isinstance(v, LoggingMixinWithRank):\n v.set_rank_0(value)\n\n def _init_logging(\n self,\n verbose_level: Optional[int] = 2,\n trigger: bool = True,\n ) -> None:\n if not self.is_rank_0:\n return None\n super()._init_logging(verbose_level, trigger)\n\n def log_msg(\n self,\n body: str,\n prefix: str = \"\",\n verbose_level: Optional[int] = 1,\n msg_level: int = logging.INFO,\n frame: Any = None,\n ) -> None:\n if not self.is_rank_0:\n return None\n super().log_msg(body, prefix, verbose_level, msg_level, frame)\n\n def log_block_msg(\n self,\n body: str,\n prefix: str = \"\",\n title: str = \"\",\n verbose_level: Optional[int] = 1,\n msg_level: int = logging.INFO,\n frame: Any = None,\n ) -> None:\n if not self.is_rank_0:\n return None\n super().log_block_msg(body, prefix, title, verbose_level, msg_level, frame)\n\n def log_timing(self) -> None:\n if not self.is_rank_0:\n return None\n return super().log_timing()\n\n\nclass DownloadProgressBar(tqdm):\n def update_to(\n self,\n b: int = 1,\n bsize: int = 1,\n tsize: Optional[int] = None,\n ) -> None:\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n\n# dl\n\n\ndef to_flow(arr: np.ndarray) -> flow.Tensor:\n dtype = flow.int32 if is_int(arr) else flow.float32\n return flow.tensor(arr, dtype=dtype)\n\n\ndef to_numpy(tensor: flow.Tensor) -> np.ndarray:\n return tensor.detach().cpu().numpy()\n\n\ndef to_device(batch: arrays_type, device: flow.device) -> arrays_type:\n return tuple(\n v.to(device)\n if isinstance(v, flow.Tensor)\n else [vv.to(device) if isinstance(vv, flow.Tensor) else vv for vv in v]\n if isinstance(v, list)\n else v\n for v in batch\n )\n\n\ndef squeeze(arr: arr_type) -> arr_type:\n n = arr.shape[0]\n arr = arr.squeeze()\n if n == 1:\n arr = arr[None, ...]\n return arr\n\n\ndef softmax(arr: arr_type) -> arr_type:\n if isinstance(arr, flow.Tensor):\n return F.softmax(arr, dim=1)\n logits = arr - np.max(arr, axis=1, keepdims=True)\n exp = np.exp(logits)\n return exp / exp.sum(1, keepdims=True)\n\n\nclass mode_context(context_error_handler):\n def __init__(\n self,\n module: nn.Module,\n *,\n to_train: Optional[bool],\n use_grad: Optional[bool],\n ):\n self._to_train = to_train\n self._module, self._training = module, module.training\n self._cache = {p: p.requires_grad for p in module.parameters()}\n if use_grad is not None:\n for p in module.parameters():\n p.requires_grad_(use_grad)\n if use_grad is None:\n self._grad_context: Optional[ContextManager] = None\n else:\n self._grad_context = flow.grad_enable() if use_grad else flow.no_grad()\n\n def __enter__(self) -> None:\n if self._to_train is not None:\n self._module.train(mode=self._to_train)\n if self._grad_context is not None:\n self._grad_context.__enter__()\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n if self._to_train is not None:\n self._module.train(mode=self._training)\n if self._grad_context is not None:\n self._grad_context.__exit__(exc_type, exc_val, exc_tb)\n for p, v in self._cache.items():\n p.requires_grad_(v)\n\n\nclass train_context(mode_context):\n def __init__(self, module: nn.Module, *, use_grad: bool = True):\n super().__init__(module, to_train=True, use_grad=use_grad)\n\n\nclass eval_context(mode_context):\n def __init__(self, module: nn.Module, *, use_grad: Optional[bool] = False):\n super().__init__(\n module,\n to_train=False,\n use_grad=use_grad,\n )\n\n\n# ml\n\n\ndef is_int(arr: np.ndarray) -> bool:\n return np.issubdtype(arr.dtype, np.integer)\n\n\ndef is_float(arr: np.ndarray) -> bool:\n return np.issubdtype(arr.dtype, np.floating)\n\n\n# cv\n\n\[email protected]_grad()\ndef make_grid(\n tensor: Union[flow.Tensor, List[flow.Tensor]],\n num_rows: int = 8,\n padding: int = 2,\n normalize: bool = False,\n value_range: Optional[Tuple[int, int]] = None,\n scale_each: bool = False,\n pad_value: int = 0,\n) -> flow.Tensor:\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = flow.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = flow.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = flow.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n\n def norm_ip(img: flow.Tensor, low: Any, high: Any) -> flow.Tensor:\n img = img.clamp(min=low, max=high)\n img = (img - low) / max(high - low, 1.0e-5)\n return img\n\n def norm_range(t: flow.Tensor, value_range: Any) -> flow.Tensor:\n if value_range is not None:\n t = norm_ip(t, value_range[0], value_range[1])\n else:\n t = norm_ip(t, to_numpy(t.min()).item(), to_numpy(t.max()).item())\n return t\n\n if scale_each is True:\n for i, t in enumerate(tensor):\n tensor[i] = norm_range(t, value_range)\n else:\n tensor = norm_range(tensor, value_range)\n\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.shape[0]\n xmaps = min(num_rows, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding)\n num_channels = tensor.shape[1]\n grid = flow.zeros(num_channels, height * ymaps + padding, width * xmaps + padding)\n grid += pad_value\n k = 0\n for y in range(ymaps):\n for x in range(xmaps):\n if k >= nmaps:\n break\n h_start, w_start = y * height + padding, x * width + padding\n h_end, w_end = h_start + height - padding, w_start + width - padding\n grid[:, h_start:h_end, w_start:w_end] = tensor[k]\n k = k + 1\n return grid\n\n\ndef save_images(arr: arr_type, path: str, num_rows: Optional[int] = None) -> None:\n if isinstance(arr, np.ndarray):\n arr = to_flow(arr)\n if num_rows is None:\n num_rows = math.ceil(math.sqrt(arr.shape[0]))\n grid = make_grid(arr, num_rows=num_rows, normalize=True)\n grid = grid.mul(255).add_(0.5).clamp(0, 255).permute(1, 2, 0)\n grid = grid.to(\"cpu\", flow.uint8).numpy()\n Image.fromarray(grid).save(path)\n\n\ndef iou(logits: arr_type, labels: arr_type) -> arr_type:\n is_flow = isinstance(logits, flow.Tensor)\n num_classes = logits.shape[1]\n if num_classes == 1:\n if is_flow:\n heat_map = flow.sigmoid(logits)\n else:\n heat_map = 1.0 / (1.0 + np.exp(-logits))\n elif num_classes == 2:\n heat_map = softmax(logits)[:, [1]]\n else:\n raise ValueError(\"`IOU` only supports binary situations\")\n intersect = heat_map * labels\n union = heat_map + labels - intersect\n kwargs = {\"dim\" if is_flow else \"axis\": tuple(range(1, len(intersect.shape)))}\n return intersect.sum(**kwargs) / union.sum(**kwargs)\n\n\ndef make_indices_visualization_map(indices: flow.Tensor) -> flow.Tensor:\n images = []\n for idx in indices.view(-1).tolist():\n img = Image.new(\"RGB\", (28, 28), (250, 250, 250))\n draw = ImageDraw.Draw(img)\n draw.text((12, 9), str(idx), (0, 0, 0))\n images.append(to_flow(np.array(img).transpose([2, 0, 1])))\n return flow.stack(images).float()\n"
] | [
[
"numpy.arange",
"numpy.random.multinomial",
"numpy.random.shuffle"
],
[
"numpy.linspace",
"numpy.arange",
"numpy.issubdtype",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.array",
"numpy.exp",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheMarex/charge | [
"85e35f7a6c8b8c161ecd851124d1363d5a450573"
] | [
"src/python/numeric.py"
] | [
"import numpy as np\n\nfrom functions import make_piecewise_linear, PiecewiseFunction, LinearFunction\n\ndef link_consumption(T, f, g, M):\n L = f(T)\n R = g(T)\n max_t_idx = np.iinfo(np.dtype('uint32')).max\n opt_H = np.full_like(L, float('inf'))\n opt_delta_idx = np.full_like(L, max_t_idx, dtype='uint32')\n for d_idx in range(len(L)):\n R_d = np.roll(R, d_idx)\n R_d[:d_idx] = float('inf')\n if L[d_idx] >= float('inf'):\n continue\n H_d = np.maximum(0, L[d_idx] + R_d)\n H_d[R_d >= float('inf')] = float('inf')\n index = opt_H > H_d\n opt_H[index] = H_d[index]\n opt_delta_idx[index] = d_idx\n opt_H[opt_H > M] = float('inf')\n opt_delta_idx[opt_H > M] = max_t_idx\n\n opt_delta = np.full_like(T, float('inf'), dtype='float')\n opt_delta[opt_delta_idx < max_t_idx] = T[opt_delta_idx[opt_delta_idx < max_t_idx]]\n\n d = PiecewiseFunction(T, np.concatenate((make_piecewise_linear(T, opt_delta), [LinearFunction(0, opt_delta[-1])])))\n h = PiecewiseFunction(T, np.concatenate((make_piecewise_linear(T, opt_H), [LinearFunction(0, opt_H[-1])])))\n\n return d, h\n\ndef link_charging(T, f, cf, M):\n L = f(T)\n CF = cf(T)\n max_t_idx = np.iinfo(np.dtype('uint32')).max\n opt_H = np.full_like(L, float('inf'))\n opt_delta_idx = np.full_like(L, max_t_idx, dtype='uint32')\n ts = []\n for d_idx in range(len(L)):\n if L[d_idx] >= float('inf'):\n continue\n y = M - L[d_idx]\n if y < 0:\n continue\n assert(y <= M)\n t_idx = np.argmax(CF > y)\n CF_y = np.roll(CF, -t_idx)\n CF_y[-t_idx:] = CF[-1]\n assert(len(CF_y) == len(L))\n CF_d = np.roll(CF_y, d_idx)\n CF_d[:d_idx] = -float('inf')\n H_d = np.maximum(0, M - CF_d)\n index = opt_H > H_d\n opt_H[index] = H_d[index]\n opt_delta_idx[index] = d_idx\n opt_H[opt_H > M] = float('inf')\n opt_delta_idx[opt_H > M] = max_t_idx\n\n print(list(ts))\n\n opt_delta = np.full_like(T, float('inf'), dtype='float')\n opt_delta[opt_delta_idx < max_t_idx] = T[opt_delta_idx[opt_delta_idx < max_t_idx]]\n\n d = PiecewiseFunction(T, np.concatenate((make_piecewise_linear(T, opt_delta), [LinearFunction(0, opt_delta[-1])])))\n h = PiecewiseFunction(T, np.concatenate((make_piecewise_linear(T, opt_H), [LinearFunction(0, opt_H[-1])])))\n return d, h\n"
] | [
[
"numpy.maximum",
"numpy.dtype",
"numpy.full_like",
"numpy.argmax",
"numpy.roll"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NengLu/topopy | [
"df61e8133ca921daf7d9980d122a2afc5e1ad925"
] | [
"test/temp/test_07_Network_stream_poi.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on September 25, 2018\nTesting suite for topopy.Flow.get_stream_poi() function\n@author: J. Vicente Perez\n@email: [email protected]\n@date: September 25, 2018\n\"\"\"\n\nimport unittest\nimport sys\nimport numpy as np\n# Add to the path code folder and data folder\nsys.path.append(\"../\")\nfrom topopy import Flow, DEM, Network\ninfolder = \"data/in\"\n\nclass StreamPoiTest(unittest.TestCase):\n \n# def test_stream_poi_01(self):\n# dem_files = ['tunez.tif', 'small25.tif', \"jebja30.tif\"]\n# for file in dem_files:\n# dem = DEM(infolder + \"/\" + file)\n# fd = Flow(dem)\n# thr = int(fd.get_ncells() * 0.01)\n# net = Network(fd, dem, thr)\n# \n# out01 = fd.get_stream_poi(thr, \"heads\", \"CELL\")\n# out02 = net.get_stream_poi(\"heads\", \"CELL\")\n# \n# computed = np.array_equal(out01, out02)\n# self.assertEqual(computed, True)\n \n def test_stream_poi_02(self):\n dem_files = ['tunez.tif', 'small25.tif', \"jebja30.tif\"]\n for file in dem_files:\n dem = DEM(infolder + \"/\" + file)\n fd = Flow(dem)\n thr = int(fd.get_ncells() * 0.01)\n net = Network(fd, dem, thr)\n \n out01 = fd.get_stream_poi(thr, \"confluences\", \"CELL\")\n out02 = net.get_stream_poi(\"confluences\", \"CELL\")\n \n computed = np.array_equal(out01, out02)\n print(file)\n self.assertEqual(computed, True)\n\n# \n# def test_stream_poi_03(self):\n# dem_files = ['tunez.tif', 'small25.tif', \"jebja30.tif\"]\n# for file in dem_files:\n# dem = DEM(infolder + \"/\" + file)\n# fd = Flow(dem)\n# thr = int(fd.get_ncells() * 0.01)\n# net = Network(fd, dem, thr)\n# \n# out01 = fd.get_stream_poi(thr, \"outlets\", \"CELL\")\n# out02 = net.get_stream_poi(\"outlets\", \"CELL\")\n# \n# computed = np.array_equal(out01, out02)\n# self.assertEqual(computed, True)\n\n\nif __name__ == \"__main__\":\n unittest.main()"
] | [
[
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Foltrex/bsu | [
"769ddac58eddd5877e40949227998575fd4dec77"
] | [
"architecture/lab3-poisson/task.py"
] | [
"from math import sin\n\nimport numpy as np\nfrom mpi4py import MPI\n\n\nclass Task:\n f = staticmethod(lambda x, y: x * y)\n f_left = f1 = staticmethod(lambda y: y ** 2)\n f_right = f2 = staticmethod(lambda y: sin(y))\n f_bottom = f3 = staticmethod(lambda x: x ** 3)\n f_top = f4 = staticmethod(lambda x: x ** 2)\n\n\nclass Point:\n def __init__(self, x, y):\n self.x, self.y = x, y\n\n def __repr__(self):\n return '({0:.2f}, {1:.2f})'.format(self.x, self.y)\n\n\nclass Index:\n def __init__(self, rows, rows_start, cols, cols_start):\n self.rows, self.rows_start, self.cols, self.cols_start = rows, rows_start, cols, cols_start\n\n\nclass Region:\n def __init__(self, top=0, right=0, bottom=0, left=0):\n self.top, self.right, self.bottom, self.left = top, right, bottom, left\n\n def __repr__(self):\n return '{' \\\n + 't: {0}, r: {1}, b: {2}, l: {3}'.format(self.top, self.right, self.bottom, self.left) \\\n + '}'\n\n\nclass ProcSquare:\n def __init__(self, full_region, region, left_top, step_x, step_y):\n self.full_region, self.region, self.left_top, self.step_x, self.step_y = full_region, region, left_top, step_x, step_y\n self.rows = region.bottom - region.top\n self.cols = region.right - region.left\n self.calc_region = Region(\n top=int(region.top == full_region.top),\n left=int(region.left == full_region.left),\n right=self.cols - int(region.right == full_region.right),\n bottom=self.rows - int(region.bottom == full_region.bottom)\n )\n self.diff = 0.0\n\n if self.rows > 0 and self.cols > 0:\n self.top_border = np.zeros(self.cols, dtype=np.float64)\n self.left_border = np.zeros(self.rows, dtype=np.float64)\n self.bottom_border = np.zeros(self.cols, dtype=np.float64)\n self.right_border = np.zeros(self.rows, dtype=np.float64)\n\n self.sqr_step_x = self.step_x * self.step_x\n self.sqr_step_y = self.step_y * self.step_y\n self.weight = 1. / (2 * (1. / self.sqr_step_x + 1. / self.sqr_step_y))\n\n if self.region.top == self.full_region.top:\n for j in range(self.cols):\n self.set(0, j, Task.f_top(left_top.x + step_x * j))\n else:\n self.neighbor_top_border = np.zeros(self.cols, dtype=np.float64)\n\n if region.bottom == full_region.bottom:\n for j in range(self.cols):\n self.set(self.rows - 1, j, Task.f_bottom(left_top.x + step_x * j))\n else:\n self.neighbor_bottom_border = np.zeros(self.cols, dtype=np.float64)\n\n if region.left == full_region.left:\n for i in range(self.rows):\n self.set(i, 0, Task.f_left(left_top.y + step_y * i))\n else:\n self.neighbor_left_border = np.zeros(self.rows, dtype=np.float64)\n\n if region.right == full_region.right:\n for i in range(self.rows):\n self.set(i, self.cols - 1, Task.f_right(left_top.y + step_y * i))\n else:\n self.neighbor_right_border = np.zeros(self.rows, dtype=np.float64)\n\n if self.rows > 2 and self.cols > 2:\n self.inner_lines = []\n for i in range(self.rows - 2):\n self.inner_lines.append(np.zeros(self.cols - 2, dtype=np.float64))\n\n def get(self, i, j):\n if j == -1:\n return self.neighbor_left_border[i]\n elif j == self.cols:\n return self.neighbor_right_border[i]\n elif i == -1:\n return self.neighbor_top_border[j]\n elif i == self.rows:\n return self.neighbor_bottom_border[j]\n elif j == 0:\n return self.left_border[i]\n elif j == self.cols - 1:\n return self.right_border[i]\n elif i == 0:\n return self.top_border[j]\n elif i == self.rows - 1:\n return self.bottom_border[j]\n else:\n return self.inner_lines[i - 1][j - 1]\n\n def set(self, i, j, val):\n if j == -1:\n self.neighbor_left_border[i] = val\n elif j == self.cols:\n self.neighbor_right_border[i] = val\n elif i == -1:\n self.neighbor_top_border[j] = val\n elif i == self.rows:\n self.neighbor_bottom_border[j] = val\n else:\n if j == 0:\n self.left_border[i] = val\n\n if j == self.cols - 1:\n self.right_border[i] = val\n\n if i == 0:\n self.top_border[j] = val\n\n if i == self.rows - 1:\n self.bottom_border[j] = val\n\n if (0 < i < self.rows - 1) and (0 < j < self.cols - 1):\n self.inner_lines[i - 1][j - 1] = val\n\n def exch(self, comm):\n left, right = comm.Shift(1, 1)\n top, bottom = comm.Shift(0, 1)\n\n if top != MPI.PROC_NULL:\n comm.send(self.top_border, dest=top)\n\n if bottom != MPI.PROC_NULL:\n self.neighbor_bottom_border = comm.recv(source=bottom)\n\n if bottom != MPI.PROC_NULL:\n comm.send(self.bottom_border, dest=bottom)\n\n if top != MPI.PROC_NULL:\n self.neighbor_top_border = comm.recv(source=top)\n\n if right != MPI.PROC_NULL:\n comm.send(self.right_border, dest=right)\n\n if left != MPI.PROC_NULL:\n self.neighbor_left_border = comm.recv(source=left)\n\n if left != MPI.PROC_NULL:\n comm.send(self.left_border, dest=left)\n\n if right != MPI.PROC_NULL:\n self.neighbor_right_border = comm.recv(source=right)\n\n comm.barrier()\n\n def calc(self):\n self.diff = 0.0\n\n for i in range(self.calc_region.top, self.calc_region.bottom):\n for j in range(self.calc_region.left, self.calc_region.right):\n x = self.left_top.x + j * self.step_x\n y = self.left_top.y + i * self.step_y\n val = self.weight * (\n (self.get(i + 1, j) + self.get(i - 1, j)) / self.sqr_step_x +\n (self.get(i, j + 1) + self.get(i, j - 1)) / self.sqr_step_y -\n Task.f(x, y)\n )\n self.diff = max(self.diff, abs(self.get(i, j) - val))\n self.set(i, j, val=val)\n\n def data(self):\n temp = np.zeros(self.cols * self.rows, dtype=np.float64)\n\n for i in range(self.rows):\n for j in range(self.cols):\n temp[i * self.cols + j] = self.get(i, j)\n\n return temp\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
georgiarichards/otc_codeine | [
"a05a6d23b24c250fb0f8cc5367919a12979870c5"
] | [
"figures_otc.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # This notebook graphs the sales and expenditure data of OTC codeine for 31 countries\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n \n#and make the plots appear in the notebook\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# # Import data\n\n# In[2]:\n\n\ndf1 = pd.read_csv(\"/Users/georgiarichards/Desktop/Python/OTC/data_otc_long.csv\")\ndf1.head()\n\n\n# In[3]:\n\n\ndf1.info()\n\n\n# In[4]:\n\n\ndf1.describe()\n\n\n# # Graphing data\n\n# In[5]:\n\n\n# this code increases the size of the figures \nfig_size = plt.rcParams[\"figure.figsize\"]\nfig_size[0] = 10\nfig_size[1] = 6\nplt.rcParams[\"figure.figsize\"] = fig_size\n\n\n# # COUNTS - all items sold \n\n# In[6]:\n\n\n# doseage units sold per 1000 of the population in 31 countries - adjusted using IQVIA coverage\nfig1a = sns.lineplot(data=df1, \n x='year', y='countpopadj', \n hue='Country',\n palette=\"bright\",\n marker='o') \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted dosage units per 1000 population\", fontsize= 12)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig1a.png')\nfig1a\n\n\n# In[7]:\n\n\n# Now I drop countries ranked 9-31 to graph the top 8 countries for sales volumes\ndf2 = df1\ndf2 = df2.set_index(\"country2\")\ndf2 = df2.drop(\"Serbia\")\ndf2 = df2.drop(\"Switzerland\")\ndf2 = df2.drop(\"Estonia\")\ndf2 = df2.drop(\"Netherlands\")\ndf2 = df2.drop(\"Finland\")\ndf2 = df2.drop(\"Romania\")\ndf2 = df2.drop(\"Bulgaria\")\ndf2 = df2.drop(\"Slovakia\")\ndf2 = df2.drop(\"Slovenia\")\ndf2 = df2.drop(\"Lithuania\")\ndf2 = df2.drop(\"Belgium\")\ndf2 = df2.drop(\"Mexico\")\ndf2 = df2.drop(\"Russia\")\ndf2 = df2.drop(\"Canada\")\ndf2 = df2.drop(\"USA\")\ndf2 = df2.drop(\"Greece\")\ndf2 = df2.drop(\"Thailand\")\ndf2 = df2.drop(\"Germany\")\ndf2 = df2.drop(\"Argentina\")\ndf2 = df2.drop(\"Italy\")\ndf2 = df2.drop(\"Portugal\")\ndf2 = df2.drop(\"Brazil\")\ndf2 = df2.drop(\"Spain\")\ndf2.head()\n\n\n# In[8]:\n\n\n# graphing the top 8 countries [by mean sales of OTC codeine] - adjusted using IQVIA coverage\n\nplt.figure(figsize=(10,6))\n\nfig2a = sns.lineplot(data=df2, \n x=\"year\", y=\"countpopadj\", \n hue=\"Country\", palette=\"bright\",\n style=\"Country\",\n markers=True, dashes=False) \n\n\nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted dosage units per 1000 population\", fontsize= 15)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig2a.png')\nfig2a\n\n\n# In[9]:\n\n\n# Now I drop countries ranked 1-8 and 17-31 to graph the next 8 countries for sales volumes\ndf3 = df1 \ndf3 = df3.set_index(\"country2\")\ndf3 = df3.drop(\"South Africa\")\ndf3 = df3.drop(\"Ireland\")\ndf3 = df3.drop(\"France\")\ndf3 = df3.drop(\"UK\")\ndf3 = df3.drop(\"Latvia\")\ndf3 = df3.drop(\"Japan\")\ndf3 = df3.drop(\"Croatia\")\ndf3 = df3.drop(\"Poland\")\ndf3 = df3.drop(\"Slovenia\")\ndf3 = df3.drop(\"Lithuania\")\ndf3 = df3.drop(\"Belgium\")\ndf3 = df3.drop(\"Mexico\")\ndf3 = df3.drop(\"Russia\")\ndf3 = df3.drop(\"Canada\")\ndf3 = df3.drop(\"USA\")\ndf3 = df3.drop(\"Greece\")\ndf3 = df3.drop(\"Thailand\")\ndf3 = df3.drop(\"Germany\")\ndf3 = df3.drop(\"Argentina\")\ndf3 = df3.drop(\"Italy\")\ndf3 = df3.drop(\"Portugal\")\ndf3 = df3.drop(\"Brazil\")\ndf3 = df3.drop(\"Spain\")\ndf3.head()\n\n\n# In[10]:\n\n\n# graphing countries ranked 9-16 for mean volume sales of OTC codeine - adjusted with IQVIA coverage\nfig2b = sns.lineplot(data=df3, \n x=\"year\", y=\"countpopadj\", \n hue=\"Country\", palette=\"bright\",\n style=\"Country\",\n markers=True, dashes=False) \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted dosage units per 1000 population\", fontsize= 15)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig2b.png')\nfig2b\n\n\n# In[11]:\n\n\n# Now I drop countries ranked 1-16 and -31 to graph the next 8 countries for sales volumes\ndf4 = df1 \ndf4 = df4.set_index(\"country2\")\ndf4 = df4.drop(\"South Africa\")\ndf4 = df4.drop(\"Ireland\")\ndf4 = df4.drop(\"France\")\ndf4 = df4.drop(\"UK\")\ndf4 = df4.drop(\"Latvia\")\ndf4 = df4.drop(\"Japan\")\ndf4 = df4.drop(\"Croatia\")\ndf4 = df4.drop(\"Poland\")\ndf4 = df4.drop(\"Serbia\")\ndf4 = df4.drop(\"Switzerland\")\ndf4 = df4.drop(\"Estonia\")\ndf4 = df4.drop(\"Netherlands\")\ndf4 = df4.drop(\"Finland\")\ndf4 = df4.drop(\"Romania\")\ndf4 = df4.drop(\"Bulgaria\")\ndf4 = df4.drop(\"Slovakia\")\ndf4 = df4.drop(\"Thailand\")\ndf4 = df4.drop(\"Germany\")\ndf4 = df4.drop(\"Argentina\")\ndf4 = df4.drop(\"Italy\")\ndf4 = df4.drop(\"Portugal\")\ndf4 = df4.drop(\"Brazil\")\ndf4 = df4.drop(\"Spain\")\ndf4.head()\n\n\n# In[12]:\n\n\n# graphing countries ranked 17-25 for mean volume sales of OTC codeine \nfig3 = sns.lineplot(data=df4, \n x=\"year\", y=\"countpop\", \n hue=\"Country\", palette=\"bright\",\n style=\"Country\",\n markers=True, dashes=False) \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted dosage units per 1000 population\", fontsize= 15)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig3.png')\nfig3\n\n\n# In[13]:\n\n\n# Now I drop countries for the last 8\ndf5 = df1 \ndf5 = df5.set_index(\"country2\")\ndf5 = df5.drop(\"South Africa\")\ndf5 = df5.drop(\"Ireland\")\ndf5 = df5.drop(\"France\")\ndf5 = df5.drop(\"UK\")\ndf5 = df5.drop(\"Latvia\")\ndf5 = df5.drop(\"Japan\")\ndf5 = df5.drop(\"Croatia\")\ndf5 = df5.drop(\"Poland\")\ndf5 = df5.drop(\"Serbia\")\ndf5 = df5.drop(\"Switzerland\")\ndf5 = df5.drop(\"Estonia\")\ndf5 = df5.drop(\"Netherlands\")\ndf5 = df5.drop(\"Finland\")\ndf5 = df5.drop(\"Romania\")\ndf5 = df5.drop(\"Bulgaria\")\ndf5 = df5.drop(\"Slovakia\")\ndf5 = df5.drop(\"Slovenia\")\ndf5 = df5.drop(\"Lithuania\")\ndf5 = df5.drop(\"Belgium\")\ndf5 = df5.drop(\"Mexico\")\ndf5 = df5.drop(\"Russia\")\ndf5 = df5.drop(\"Canada\")\ndf5 = df5.drop(\"USA\")\ndf5 = df5.drop(\"Greece\")\ndf5.head()\n\n\n# In[14]:\n\n\n# graphing countries ranked 9-16 for mean volume sales of OTC codeine \nfig4 = sns.lineplot(data=df5, \n x=\"year\", y=\"countpop\", \n hue=\"Country\", palette=\"bright\",\n style=\"Country\",\n markers=True, dashes=False) \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted doseage units per 1000 population\", fontsize= 15)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig4.png')\nfig4\n\n\n# # Public expenditure \n\n# In[15]:\n\n\n# this graphs the public expenditure for all 31 countries - adjusted with IQVIA coverage\nfig5 = sns.lineplot(data=df1, \n x='year', y='costpopadj', \n hue='Country',\n palette=\"bright\",\n marker=\"o\") \nplt.xlabel(\" \")\nplt.ylabel(\"Adjusted public expenditure (£) per 1,000 population\", fontsize= 12)\nplt.legend(loc='upper right', bbox_to_anchor=(1.2, 1))\nplt.savefig('fig5.png')\nfig5\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
marcdemers/pytorch_geometric_temporal | [
"2c99d690cf183e6c9e7ff40d15ba2f8b875c1aaf"
] | [
"torch_geometric_temporal/nn/recurrent/gconv_gru.py"
] | [
"import torch\nfrom torch_geometric.nn import ChebConv\n\n\nclass GConvGRU(torch.nn.Module):\n r\"\"\"An implementation of the Chebyshev Graph Convolutional Gated Recurrent Unit\n Cell. For details see this paper: `\"Structured Sequence Modeling with Graph\n Convolutional Recurrent Networks.\" <https://arxiv.org/abs/1612.07659>`_\n\n Args:\n in_channels (int): Number of input features.\n out_channels (int): Number of output features.\n K (int): Chebyshev filter size :math:`K`.\n normalization (str, optional): The normalization scheme for the graph\n Laplacian (default: :obj:`\"sym\"`):\n\n 1. :obj:`None`: No normalization\n :math:`\\mathbf{L} = \\mathbf{D} - \\mathbf{A}`\n\n 2. :obj:`\"sym\"`: Symmetric normalization\n :math:`\\mathbf{L} = \\mathbf{I} - \\mathbf{D}^{-1/2} \\mathbf{A}\n \\mathbf{D}^{-1/2}`\n\n 3. :obj:`\"rw\"`: Random-walk normalization\n :math:`\\mathbf{L} = \\mathbf{I} - \\mathbf{D}^{-1} \\mathbf{A}`\n\n You need to pass :obj:`lambda_max` to the :meth:`forward` method of\n this operator in case the normalization is non-symmetric.\n :obj:`\\lambda_max` should be a :class:`torch.Tensor` of size\n :obj:`[num_graphs]` in a mini-batch scenario and a\n scalar/zero-dimensional tensor when operating on single graphs.\n You can pre-compute :obj:`lambda_max` via the\n :class:`torch_geometric.transforms.LaplacianLambdaMax` transform.\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n \"\"\"\n def __init__(self, in_channels: int, out_channels: int, K: int,\n normalization: str=\"sym\", bias: bool=True):\n super(GConvGRU, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.K = K\n self.normalization = normalization\n self.bias = bias\n self._create_parameters_and_layers()\n\n\n def _create_update_gate_parameters_and_layers(self):\n\n self.conv_x_z = ChebConv(in_channels=self.in_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n self.conv_h_z = ChebConv(in_channels=self.out_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n\n def _create_reset_gate_parameters_and_layers(self):\n\n self.conv_x_r = ChebConv(in_channels=self.in_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n self.conv_h_r = ChebConv(in_channels=self.out_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n\n def _create_candidate_state_parameters_and_layers(self):\n\n self.conv_x_h = ChebConv(in_channels=self.in_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n self.conv_h_h = ChebConv(in_channels=self.out_channels,\n out_channels=self.out_channels,\n K=self.K,\n normalization=self.normalization,\n bias=self.bias)\n\n\n def _create_parameters_and_layers(self):\n self._create_update_gate_parameters_and_layers()\n self._create_reset_gate_parameters_and_layers()\n self._create_candidate_state_parameters_and_layers()\n\n\n def _set_hidden_state(self, X, H):\n if H is None:\n H = torch.zeros(X.shape[0], self.out_channels)\n return H\n\n\n def _calculate_update_gate(self, X, edge_index, edge_weight, H):\n Z = self.conv_x_z(X, edge_index, edge_weight)\n Z = Z + self.conv_h_z(H, edge_index, edge_weight)\n Z = torch.sigmoid(Z)\n return Z\n\n\n def _calculate_reset_gate(self, X, edge_index, edge_weight, H):\n R = self.conv_x_r(X, edge_index, edge_weight)\n R = R + self.conv_h_r(H, edge_index, edge_weight)\n R = torch.sigmoid(R) \n return R\n\n\n def _calculate_candidate_state(self, X, edge_index, edge_weight, H, R):\n H_tilde = self.conv_x_h(X, edge_index, edge_weight)\n H_tilde = H_tilde + self.conv_h_h(H*R, edge_index, edge_weight)\n H_tilde = torch.tanh(H_tilde)\n return H_tilde\n\n\n def _calculate_hidden_state(self, Z, H, H_tilde):\n H = Z*H + (1-Z)*H_tilde\n return H\n\n\n def forward(self, X: torch.FloatTensor, edge_index: torch.LongTensor,\n edge_weight: torch.FloatTensor=None, H: torch.FloatTensor=None) -> torch.FloatTensor:\n \"\"\"\n Making a forward pass. If edge weights are not present the forward pass\n defaults to an unweighted graph. If the hidden state matrix is not present\n when the forward pass is called it is initialized with zeros.\n\n Arg types:\n * **X** *(PyTorch Float Tensor)* - Node features.\n * **edge_index** *(PyTorch Long Tensor)* - Graph edge indices.\n * **edge_weight** *(PyTorch Long Tensor, optional)* - Edge weight vector.\n * **H** *(PyTorch Float Tensor, optional)* - Hidden state matrix for all nodes.\n\n Return types:\n * **H** *(PyTorch Float Tensor)* - Hidden state matrix for all nodes.\n \"\"\"\n H = self._set_hidden_state(X, H)\n Z = self._calculate_update_gate(X, edge_index, edge_weight, H)\n R = self._calculate_reset_gate(X, edge_index, edge_weight, H)\n H_tilde = self._calculate_candidate_state(X, edge_index, edge_weight, H, R)\n H = self._calculate_hidden_state(Z, H, H_tilde)\n return H\n"
] | [
[
"torch.tanh",
"torch.sigmoid",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Exorust/Discrete-Event-Simulation | [
"1d5d43c88521db7c0e010966f6df685256492d71"
] | [
"Process Generation Script.py"
] | [
"import numpy as np\n# exp_dist <-> f(x, beta) = (1/beta) * exp(-(1/beta) * x)\nbeta = 10\nprocess_count = 20\na=np.round(np.random.exponential(scale=beta, size=(process_count,2)))\nnp.savetxt(\"process.csv\", a, delimiter=\",\",fmt=\"%i\")\n\n# Generates Arrival time and burst time\n"
] | [
[
"numpy.savetxt",
"numpy.random.exponential"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
waterahr/HR-Net | [
"52f8d9d8837fca1307aff4df4ed676cab2bb296a",
"52f8d9d8837fca1307aff4df4ed676cab2bb296a",
"52f8d9d8837fca1307aff4df4ed676cab2bb296a"
] | [
"src/network/hiarBayesGoogLenet_gap_v4.py",
"src/train_berk.py",
"src/extract_feature.py"
] | [
"import sys\nsys.path.append(\"..\")\nimport os\nfrom keras.models import Model\nfrom keras.layers import Activation, Input, Flatten, Dense, Dropout, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, Lambda\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.layers.recurrent import LSTM\nfrom keras.utils import plot_model\n#from spp.spp.SpatialPyramidPooling import SpatialPyramidPooling\nimport keras.backend as K\nimport numpy as np\nfrom keras.models import Sequential\n\n\n\nclass hiarBayesGoogLeNet:\n @staticmethod\n def Conv2d_BN(x, nb_filter, kernel_size, padding='same', strides=(1,1), name=None):\n if name is not None:\n bn_name = name + '_bn'\n conv_name = name\n else:\n bn_name = None\n conv_name = None\n \n x = Conv2D(nb_filter, kernel_size, padding=padding, strides=strides, activation='relu', name=conv_name)(x)\n x = BatchNormalization(axis=3, name=bn_name)(x)\n return x\n \n @staticmethod\n def Inception(x, nb_filter, name=None):\n \"\"\"\n branch1x1 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter, (1,1), padding='same', strides=(1,1), name=name)\n \n branch3x3 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter, (1,1), padding='same', strides=(1,1), name=name)\n branch3x3 = hiarBayesGoogLeNet.Conv2d_BN(branch3x3, nb_filter,(3,3), padding='same', strides=(1,1), name=name)\n \n branch5x5 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter, (1,1), padding='same', strides=(1,1),name=name)\n branch5x5 = hiarBayesGoogLeNet.Conv2d_BN(branch5x5, nb_filter, (5,5), padding='same', strides=(1,1), name=name)\n \n branchpool = MaxPooling2D(pool_size=(3,3), strides=(1,1), padding='same')(x)\n branchpool = hiarBayesGoogLeNet.Conv2d_BN(branchpool, nb_filter, (1,1), padding='same', strides=(1,1), name=name)\n \"\"\"\n branch1x1 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter[0], (1,1), padding='same', strides=(1,1), name=name+'_1x1')\n \n branch3x3 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter[1], (1,1), padding='same', strides=(1,1), name=name+'_3x3_reduce')\n branch3x3 = hiarBayesGoogLeNet.Conv2d_BN(branch3x3, nb_filter[2],(3,3), padding='same', strides=(1,1), name=name+'_3x3')\n \n branch5x5 = hiarBayesGoogLeNet.Conv2d_BN(x, nb_filter[3], (1,1), padding='same', strides=(1,1),name=name+'5x5_reduce')\n branch5x5 = hiarBayesGoogLeNet.Conv2d_BN(branch5x5, nb_filter[4], (5,5), padding='same', strides=(1,1), name=name+'_5x5')\n \n branchpool = MaxPooling2D(pool_size=(3,3), strides=(1,1), padding='same')(x)\n branchpool = hiarBayesGoogLeNet.Conv2d_BN(branchpool, nb_filter[5], (1,1), padding='same', strides=(1,1), name=name+'_pool_proj')\n \n x = concatenate([branch1x1, branch3x3, branch5x5, branchpool], axis=3)\n \n return x\n\n \"\"\"\n @staticmethod\n def SPP(x, pooling_regions):\n dim_ordering = K.image_dim_ordering()\n assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'\n if dim_ordering == 'th':\n input_shape = (num_channels, None, None)\n elif dim_ordering == 'tf':\n input_shape = (None, None, num_channels)\n model = Sequential()\n model.add(SpatialPyramidPooling(pooling_regions, input_shape=input_shape))\n \n return model.predict(x)\n \"\"\"\n \n\n @staticmethod\n def build(width, height, depth, classes, pooling_regions = [1, 3], weights=\"imagenet\"):\n assert(isinstance(classes, list), 'Must be list type.')\n assert(len(classes) == 3, 'Must be 3 elements in the list.')\n inpt = Input(shape=(width, height, depth))\n #padding = 'same',填充为(步长-1)/2,还可以用ZeroPadding2D((3,3))\n x = hiarBayesGoogLeNet.Conv2d_BN(inpt, 64, (7,7), strides=(2,2), padding='same', name=\"conv1_7x7_s2\")\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)\n x = hiarBayesGoogLeNet.Conv2d_BN(x, 192, (3,3), strides=(1,1), padding='same', name=\"conv2_3x3\")\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, 64, name=\"inception_3a\")#256\n x = hiarBayesGoogLeNet.Inception(x, 120, name=\"inception_3b\")#480\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, [64,96,128,16,32,32], name=\"inception_3a\")#256\n x = hiarBayesGoogLeNet.Inception(x, [128,128,192,32,96,64], name=\"inception_3b\")#480\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, 128, name=\"inception_4a\")#512\n x = hiarBayesGoogLeNet.Inception(x, 128, name=\"inception_4b\")\n x = hiarBayesGoogLeNet.Inception(x, 128, name=\"inception_4c\")\n x = hiarBayesGoogLeNet.Inception(x, 132, name=\"inception_4d\")#528\n x = hiarBayesGoogLeNet.Inception(x, 208, name=\"inception_4e\")#832\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, [192,96,208,16,48,64], name=\"inception_4a\")#512\n fea_low = x\n #fea_low = Conv2D(512, (3, 3), padding='same', activation='relu', name='conv1_e')(x)\n #fea_low = GlobalAveragePooling2D()(x)#, name=\"gap_low\"\n #fea_low = Dense(512, activation='relu')(fea_low)\n x = hiarBayesGoogLeNet.Inception(x, [160,112,224,24,64,64], name=\"inception_4b\")\n x = hiarBayesGoogLeNet.Inception(x, [128,128,256,24,64,64], name=\"inception_4c\")\n x = hiarBayesGoogLeNet.Inception(x, [112,144,288,32,64,64], name=\"inception_4d\")#528\n fea_mid = x\n #fea_mid = Conv2D(512, (3, 3), padding='same', activation='relu', name='conv2_e')(x)\n #fea_mid = GlobalAveragePooling2D()(x)#, name=\"gap_mid\"\n #fea_mid = Dense(512, activation='relu')(fea_mid)\n x = hiarBayesGoogLeNet.Inception(x, [256,160,320,32,128,128], name=\"inception_4e\")#832\n x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, 208, name=\"inception_5a\")\n x = hiarBayesGoogLeNet.Inception(x, 256, name=\"inception_5b\")#1024\n \"\"\"\n x = hiarBayesGoogLeNet.Inception(x, [256,160,320,32,128,128], name=\"inception_5a\")\n x = hiarBayesGoogLeNet.Inception(x, [384,192,384,48,128,128], name=\"inception_5b\")#1024\n fea_hig = x\n #fea_hig = Conv2D(1024, (3, 3), padding='same', activation='relu', name='conv3_e')(x)\n #fea_hig = GlobalAveragePooling2D()(x)#, name=\"gap_hig\"\n #fea_hig = Dense(1024, activation='relu')(fea_hig)\n \"\"\"\n predictions_low = Dense(classes[0], name=\"low\", activation=\"sigmoid\")(fea_low)#\n predictions_mid_hs = Dense(classes[1], name=\"middle_hs\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_ub = Dense(classes[2], name=\"middle_ub\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_lb = Dense(classes[3], name=\"middle_lb\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_sh = Dense(classes[4], name=\"middle_sh\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_at = Dense(classes[5], name=\"middle_at\", activation=\"sigmoid\")(fea_mid)#\n predictions_mid_ot = Dense(classes[6], name=\"middle_ot\", activation=\"sigmoid\")(fea_mid)#\n predictions_hig = Dense(classes[7], name=\"high_fea\", activation=\"sigmoid\")(fea_hig)#\n \"\"\"\n fea_low = Conv2D(512, (3, 3), padding='same', activation='relu')(fea_low)\n #fea_low = Flatten()(fea_low)\n #fea_low = Dense(512, activation='relu')(fea_low)\n fea_low = GlobalAveragePooling2D()(fea_low)\n predictions_low = Dense(classes[0], name=\"low\", activation=\"sigmoid\")(fea_low)#\n fea_mid_ub = Conv2D(512, (3, 3), padding='same', activation='relu')(fea_mid)\n #fea_mid_ub = Flatten()(fea_mid_ub)\n #fea_mid_ub = Dense(512, activation='relu')(fea_mid_ub)\n fea_mid_ub = GlobalAveragePooling2D()(fea_mid_ub)\n predictions_mid_ub = Dense(classes[1], name=\"middle_ub\", activation=\"sigmoid\")(fea_mid_ub)#\n fea_mid_lb = Conv2D(512, (3, 3), padding='same', activation='relu')(fea_mid)\n #fea_mid_lb = Flatten()(fea_mid_lb)\n #fea_mid_lb = Dense(512, activation='relu')(fea_mid_lb)\n fea_mid_lb = GlobalAveragePooling2D()(fea_mid_lb)\n predictions_mid_lb = Dense(classes[2], name=\"middle_lb\", activation=\"sigmoid\")(fea_mid_lb)#\n fea_mid_sh = Conv2D(512, (3, 3), padding='same', activation='relu')(fea_mid)\n #fea_mid_sh = Flatten()(fea_mid_sh)\n #fea_mid_sh = Dense(512, activation='relu')(fea_mid_sh)\n fea_mid_sh = GlobalAveragePooling2D()(fea_mid_sh)\n predictions_mid_sh = Dense(classes[3], name=\"middle_sh\", activation=\"sigmoid\")(fea_mid_sh)#\n #fea_mid_ot = Flatten()(fea_mid_ot)\n #fea_mid_ot = Dense(512, activation='relu')(fea_mid_ot)\n #fea_mid_ot = GlobalAveragePooling2D()(fea_mid_ot)\n #predictions_mid_ot = Dense(classes[6], name=\"middle_ot\", activation=\"sigmoid\")(fea_mid_ot)#\n fea_hig = Conv2D(1024, (3, 3), padding='same', activation='relu')(fea_hig)\n #fea_hig = Flatten()(fea_hig)\n #fea_hig = Dense(512, activation='relu')(fea_hig)\n fea_hig = GlobalAveragePooling2D()(fea_hig)\n predictions_hig = Dense(classes[4], name=\"high_fea\", activation=\"sigmoid\")(fea_hig)\n #\"\"\"\n \"\"\"PCM2018\"\"\"\n #predictions_hig = Dense(classes[2], activation=\"sigmoid\", name=\"high\")(concatenate([fea_low, fea_mid, fea_hig], axis=1))\n \"\"\"PCM2018\"\"\"\n predictions_priori = concatenate([predictions_low, predictions_mid_ub, predictions_mid_lb, predictions_mid_sh], axis=1)\n \"\"\"mar\"\"\"\n #val = np.load(\"../results/state_transition_matrix.npy\")\n #state_transition_matrix = K.variable(value=val, dtype='float32', name='state_transition_matrix')\n #predictions_hig_cond = Lambda(lambda x:K.dot(x, state_transition_matrix), name=\"high_cond\")(predictions_priori)\n \"\"\"mar\"\"\"\n predictions_hig_cond = Dense(classes[4], activation=\"sigmoid\", name=\"high_cond\")(predictions_priori)\n #predictions_priori = K.reshape(concatenate([predictions_low, predictions_mid], axis=1), (-1, classes[0]+classes[1], 1))\n #predictions_hig_cond = LSTM(classes[2], activation=\"sigmoid\", name=\"high_cond\")(predictions_priori)\n predictions_hig_posterior = Lambda(lambda x:x[1] * x[0], name=\"high\")([predictions_hig_cond, predictions_hig])\n #predictions_hig_posterior = Lambda(lambda x:K.sigmoid(K.tanh((x[1] - 0.5) * np.pi) * x[0]), name=\"high\")([predictions_hig_cond, predictions_hig])\n #multi#Lambda(lambda x:x[0] * x[1], name=\"high_post\")([predictions_hig_cond, predictions_hig])\n #cond#Dense(classes[2], activation=\"sigmoid\", name=\"high_post\")(concatenate([predictions_hig, predictions_hig_cond], axis=1))\n #add#Lambda(lambda x:(x[0] + x[1])/2, name=\"high_post\")([predictions_hig_cond, predictions_hig])\n \"\"\"\"mar\"\"\"\n #predictions_low = Activation(\"sigmoid\")(predictions_low)\n #predictions_mid = Activation(\"sigmoid\")(predictions_mid)\n #predictions_hig_posterior = Activation(\"sigmoid\")(predictions_hig_posterior)\n \"\"\"mar\"\"\"\n #predictions = concatenate([predictions_low, predictions_mid, predictions_hig_posterior], axis=1)\n \"\"\"PCM2018\"\"\"\n #predictions = concatenate([predictions_low, predictions_mid, predictions_hig], axis=1)\n \"\"\"PCM2018\"\"\"\n \"\"\"\n predictions_low = Dense(classes[0], activation=\"sigmoid\", name=\"low\")(fea_low)\n predictions_mid_fea = Dense(classes[1], activation=\"sigmoid\", name=\"middle_fea\")(fea_mid)\n predictions_mid_cond = Dense(classes[1], activation=\"sigmoid\", name=\"middle_cond\")(predictions_low)\n predictions_mid = Lambda(lambda x:(x[0] + x[1])/2, name=\"mid\")([predictions_mid_fea, predictions_mid_cond])\n predictions_hig_fea = Dense(classes[2], activation=\"sigmoid\", name=\"high_fea\")(fea_hig)\n predictions_priori = concatenate([predictions_low, predictions_mid], axis=1)\n predictions_hig_cond = Dense(classes[2], activation=\"sigmoid\", name=\"high_cond\")(predictions_priori)\n predictions_hig = Lambda(lambda x:(x[0] + x[1])/2, name=\"high_post\")([predictions_hig_cond, predictions_hig_fea])\n predictions = concatenate([predictions_low, predictions_mid, predictions_hig], axis=1)\n \"\"\"\n \"\"\"\n x = concatenate([spp_low, spp_mid, spp_hig], axis=1)#2048\n #x = AveragePooling2D(pool_size=(7,7), strides=(7,7), padding='same')(x)\n x = Dropout(0.4)(x)\n x = Dense(2048, activation='relu')(x)\n x = Dense(classes, activation='softmax')(x)\n \"\"\"\n # create the model\n model = Model(inpt, [predictions_low, predictions_mid_ub, predictions_mid_lb, predictions_mid_sh, predictions_hig_posterior], name='inception')\n if weights == \"imagenet\":\n weights = np.load(\"../results/googlenet_weights.npy\", encoding='latin1').item()\n for layer in model.layers:\n if layer.get_weights() == []:\n continue\n #weight = layer.get_weights()\n if layer.name in weights:\n #print(layer.name, end=':')\n #print(layer.get_weights()[0].shape == weights[layer.name]['weights'].shape, end=' ')\n #print(layer.get_weights()[1].shape == weights[layer.name]['biases'].shape)\n layer.set_weights([weights[layer.name]['weights'], weights[layer.name]['biases']])\n # return the constructed network architecture\n return model\n\nif __name__ == \"__main__\":\n os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n model = hiarBayesGoogLeNet.build(160, 75, 3, [10, 20, 30])#因为googleNet默认输入32*32的图片\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.summary()\n plot_model(model, to_file=\"../../results/models/hiarBayesGoogleLenet.png\", show_shapes=True)",
"\"\"\"\npython train_berk.py -g 1 -c 9 -b 32 -m GoogLeNet\n\"\"\"\nfrom network.GoogLenetSPP import GoogLeNetSPP\nfrom network.GoogleLenet import GoogLeNet\nfrom network.OEDC_GoogLenetSPP import OEDCGoogLeNetSPP\nfrom network.OEDC_GoogLenetSPP_lowerBody import OEDCGoogLeNetSPP_lowerBody\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image\nfrom keras.utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping, TensorBoard, CSVLogger\nimport sys\nimport os\nimport argparse\nimport json\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom keras import backend as K\nfrom angular_losses import weighted_categorical_crossentropy, coarse_to_fine_categorical_crossentropy_lowerbody\n\nalpha = []\n\ndef parse_arg():\n models = ['GoogLeNet', 'GoogLeNetSPP', 'OEDCGoogLeNetSPP', 'OEDCGoogLeNetSPP_lowerBody']\n parser = argparse.ArgumentParser(description='training of the WPAL...')\n parser.add_argument('-g', '--gpus', type=str, default='',\n help='The gpu device\\'s ID need to be used')\n parser.add_argument('-c', '--classes', type=int, default=9,\n help='The total number of classes to be predicted')\n parser.add_argument('-b', '--batch', type=int, default=32,\n help='The batch size of the training process')\n parser.add_argument('-wd', '--width', type=int, default=420,\n help='The width of thWPAL_berke picture')\n parser.add_argument('-hg', '--height', type=int, default=210,\n help='The height of the picture')\n parser.add_argument('-w', '--weight', type=str, default='',\n help='The weights file of the pre-training')\n parser.add_argument('-m', '--model', type=str, default='',\n help='The model including: '+str(models))\n parser.add_argument('-d', '--depth', type=int, default=9,\n help='The model depth')\n args = parser.parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus\n return args\n\n\n \n\n\nif __name__ == \"__main__\":\n args = parse_arg()\n save_name = \"binary9-depth\" + str(args.depth)\n class_num = args.classes\n alpha = np.zeros((class_num,))\n\n\n # Data augmentation to pre-processing\n heavy_augmentation = True\n if heavy_augmentation:\n datagen = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=45,\n width_shift_range=0.25,\n height_shift_range=0.25,\n horizontal_flip=True,\n vertical_flip=False,\n zoom_range=0.5,\n channel_shift_range=0.5,\n fill_mode='nearest')\n else:\n datagen = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=0,\n width_shift_range=0.125,\n height_shift_range=0.125,\n horizontal_flip=True,\n vertical_flip=False,\n fill_mode='nearest')\n image_width = args.width\n image_height = args.height\n if args.model == \"GoogLeNetSPP\":\n filename = r\"../results/berk.csv\"\n elif args.model == \"GoogLeNet\":\n filename = r\"../results/berk.csv\"\n elif args.model == \"OEDCGoogLeNetSPP\":\n filename = r\"../results/berk_coarse_to_fine_labels_pd.csv\"\n elif args.model == \"OEDCGoogLeNetSPP_lowerBody\":\n filename = r\"../results/berk_lowerBody_labels_pd.csv\"\n data = np.array(pd.read_csv(filename))[:, 1:]\n length = len(data)\n #global alpha\n data_x = np.zeros((length, image_width, image_height, 3))\n data_y = np.zeros((length, class_num))\n for i in range(length):\n #img = image.load_img(path + m)\n img = image.load_img(data[i, 0], target_size=(image_width, image_height, 3))\n data_x[i] = image.img_to_array(img)\n data_y[i] = np.array(data[i, 1:1+class_num], dtype=\"float32\")\n for i in range(class_num):\n alpha[i] += list(data_y[:, i]).count(1.0)\n alpha /= length\n print(\"The positive ratio of each attribute is:\\n\", alpha)\n X_train, X_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.3, random_state=0)\n print(\"The shape of the X_train is: \", X_train.shape)\n print(\"The shape of the y_train is: \", y_train.shape)\n print(\"The shape of the X_test is: \", X_test.shape)\n print(\"The shape of the y_test is: \", y_test.shape)\n #np.save(\"../results/\" + args.model + '_' + save_name + \"_X_test.npy\", X_test)\n #np.save(\"../results/\" + args.model + '_' + save_name + \"_y_test.npy\", y_test)\n \n \n #googleNet默认输入32*32的图片\n if args.model == \"GoogLeNetSPP\":\n model = GoogLeNetSPP.build(None, None, 3, class_num)\n loss_func = 'binary_crossentropy'\n loss_weights = None\n metrics=['accuracy']\n elif args.model == \"GoogLeNet\":\n model = GoogLeNet.build(image_width, image_height, 3, class_num, model_depth=args.depth)\n loss_func = 'binary_crossentropy'\n loss_weights = None\n metrics=['accuracy']\n elif args.model == \"OEDCGoogLeNetSPP\":\n model = OEDCGoogLeNetSPP.build(None, None, 3, 7, [3, 7, 11, 6, 7, 12, 15])#[4, 7, 11, 7, 7, 13, 16]\n loss_func = 'binary_crossentropy'\n loss_weights = None\n metrics=['accuracy']\n elif args.model == \"OEDCGoogLeNetSPP_lowerBody\":\n model = OEDCGoogLeNetSPP_lowerBody.build(None, None, 3, 2, 7, 5)\n loss_func = 'binary_crossentropy'#coarse_to_fine_categorical_crossentropy_lowerbody(alpha)#['categorical_crossentropy', lambda y_true,y_pred: y_pred]\n loss_weights=None#[1.,1.]\n metrics={'softmax_labels':'accuracy'}\n gpus_num = len(args.gpus.split(','))\n if gpus_num != 1:\n multi_gpu_model(model, gpus=gpus_num)\n #model.compile(loss=\"categorical_crossentropy\", optimizer='adam', metrics=['accuracy'])\n model.compile(loss=loss_func, optimizer='adam', loss_weights=loss_weights, metrics=metrics)\n model.summary()\n\n\n nb_epoch = 100\n batch_size = args.batch\n train_generator = datagen.flow(X_train, y_train, batch_size=batch_size)\n val_generator = datagen.flow(X_test, y_test, batch_size=batch_size)\n monitor = 'val_loss'\n if args.model == \"GoogLeNetSPP\":\n model_dir = 'GoogLeNetSPP_berk'\n elif args.model == \"GoogLeNet\":\n model_dir = 'GoogLeNet_berk'\n elif args.model == \"OEDCGoogLeNetSPP\":\n model_dir = 'OEDCWPAL_berk'\n elif args.model == \"OEDCGoogLeNetSPP_lowerBody\":\n model_dir = 'OEDCWPAL_berk_lowerBody'\n checkpointer = ModelCheckpoint(filepath = '../models/imagenet_models/' + model_dir + '/' + save_name+ '_epoch{epoch:02d}_valloss{'+ monitor + ':.2f}.hdf5',\n monitor = monitor,\n verbose=1, \n save_best_only=True, \n save_weights_only=True,\n mode='auto', \n period=25)\n csvlog = CSVLogger('../models/imagenet_models/' + model_dir + '/' + save_name+ '_log.csv')#, append=True\n if args.weight != '':\n model.load_weights(args.weight, by_name=True)\n model.fit_generator(train_generator,\n steps_per_epoch = int(X_train.shape[0] / (batch_size * gpus_num)),\n epochs = nb_epoch,\n validation_data = val_generator,\n validation_steps = int(X_test.shape[0] / (batch_size * gpus_num)),\n callbacks = [checkpointer, csvlog])\n if args.model == \"GoogLeNetSPP\":\n model.save_weights('../models/imagenet_models/' + model_dir + '/' + save_name+ '_final_model.h5')\n elif args.model == \"GoogLeNet\":\n model.save_weights('../models/imagenet_models/' + model_dir + '/' + save_name+ '_final_model.h5')\n elif args.model == \"OEDCGoogLeNetSPP\":\n model.save_weights('../models/imagenet_models/' + model_dir + '/' + save_name+ '_final_model.h5')\n elif args.model == \"OEDCGoogLeNetSPP_lowerBody\":\n model.save_weights('../models/imagenet_models/' + model_dir + '/' + save_name+ '_train_final_model.h5')\n #model_pred.compile(loss=\"categorical_crossentropy\", optimizer='adam', metrics=['accuracy'])\n #model_pred.load_weights('../models/imagenet_models/' + model_dir + '/train_final_model.h5', by_name=True)\n #model_pred.save('../models/imagenet_models/' + model_dir + '/final_model.h5')\n",
"\"\"\"\npython extrac_feature.py -g 0,1 -c 61 -w ../models/xxxxx.hdf5 -m GoogLeNetSPP\npython extrac_feature.py -g 0,1 -c 68 -w ../models/xxxxx.hdf5 -m OEDCGoogLeNetSPP\n\"\"\"\nfrom keras import Model, Sequential\nfrom GoogLenetSPP import GoogLeNetSPP\nfrom OEDC_GoogLenetSPP import OEDCGoogLeNetSPP\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image\nfrom keras.utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping, TensorBoard, CSVLogger\nimport sys\nimport os\nimport argparse\nimport json\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom keras import backend as K\n\ndef parse_arg():\n models = ['GooLeNetSPP', 'OEDCGoogLeNetSPP']\n parser = argparse.ArgumentParser(description='training of the WPAL...')\n parser.add_argument('-g', '--gpus', type=str, default='',\n help='The gpu device\\'s ID need to be used')\n parser.add_argument('-c', '--classes', type=int, default=65,\n help='The total number of classes to be predicted')\n parser.add_argument('-wd', '--width', type=int, default=160,\n help='The width of thWPAL_PETAe picture')\n parser.add_argument('-hg', '--height', type=int, default=75,\n help='The height of the picture')\n parser.add_argument('-w', '--weight', type=str, default='',\n help='The weights file of the pre-training')\n parser.add_argument('-m', '--model', type=str, default='',\n help='The model including: '+str(models))\n args = parser.parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus\n return args\n\ndef weighted_categorical_crossentropy(y_true, y_pred):\n total_num = 19000\n f = open('../results/PETA_ratio_positive_samples_for_attributes.json',\"r\")\n for line in f:\n ratio = json.loads(line)\n f.close()\n ratio_array = np.array(list(ratio.values())) * 1.0 / total_num\n #print(data)\n #print(K.int_shape(y_pred))(None, 65)\n loss = K.zeros_like(K.categorical_crossentropy(y_true, y_pred))\n for i in range(K.int_shape(y_pred)[1]):\n loss += 0.5 * (y_true[:, i] * K.log(y_pred[:, i]) / ratio_array[i] + y_pred[:, i] * K.log(y_true[:, i]) / (1 - ratio_array[i]))\n return loss\n \n\n\nif __name__ == \"__main__\":\n args = parse_arg()\n class_num = args.classes\n #googleNet默认输入32*32的图片\n if args.model == \"GoogLeNetSPP\":\n model = GoogLeNetSPP.build(None, None, 3, class_num)\n elif args.model == \"OEDCGoogLeNetSPP\":\n model = OEDCGoogLeNetSPP.build(None, None, 3, 7, [3, 7, 11, 6, 7, 12, 15])#[4, 7, 11, 7, 7, 13, 16]\n model = Model(inputs=model.input,\n outputs=model.get_layer('concatenate_10').output)\n gpus_num = len(args.gpus.split(','))\n if gpus_num != 1:\n model = multi_gpu_model(model, gpus=gpus_num)\n model.summary()\n\n\n \n image_width = args.width\n image_height = args.height\n if args.model == \"GoogLeNetSPP\":\n filename = r\"../results/PETA_labels_pd.csv\"\n elif args.model == \"OEDCGoogLeNetSPP\":\n filename = r\"../results/PETA_coarse_to_fine_labels_pd.csv\"\n data = np.array(pd.read_csv(filename))[:, 1:]\n length = len(data)\n data_x = np.zeros((length, image_width, image_height, 3))\n data_y = np.zeros((length, class_num))\n for i in range(length):\n #img = image.load_img(path + m)\n img = image.load_img(data[i, 0], target_size=(image_width, image_height, 3))\n data_x[i] = image.img_to_array(img)\n data_y[i] = np.array(data[i, 1:1+class_num], dtype=\"float32\")\n print(\"The shape of the X_train is: \", data_x.shape)\n print(\"The shape of the y_train is: \", data_y.shape)\n\n\n if args.model == \"GoogLeNetSPP\":\n model_dir = 'WPAL_PETA'\n elif args.model == \"OEDCGoogLeNetSPP\":\n model_dir = 'OEDCWPAL_PETA'\n if args.weight != '':\n model.load_weights(args.weight)\n\n features_all = model.predict(data_x)\n labels_all = data_y\n data_all = {'features_all':features_all, \n 'labels_all':data_y}\n savename = \"../results/\" + model_dir + '_features_all.pickle'\n savename = \"../results/\" + model_dir + '_features_all.pickle'\n fsave = open(savename, 'wb')\n pickle.dump(data_all, fsave)\n fsave.close()"
] | [
[
"numpy.load"
],
[
"pandas.read_csv",
"numpy.array",
"numpy.zeros",
"sklearn.model_selection.train_test_split"
],
[
"numpy.array",
"numpy.zeros",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rabaneda/S1chain | [
"b2c0c2efc6b8b09c92f66d5e10074f3c1df04e03"
] | [
"source/winddir.py"
] | [
"\"\"\"\nCreated on Wed May 20 15:52:47 2020\n\n@author: Alberto\n\"\"\"\n\nimport numpy as np\nimport scipy.stats as st\nfrom nc_methods import NetCDFManager\nimport warnings\n\n#------------------------------------------------------------------------------\n\nclass WIND(NetCDFManager):\n \n kernels = {'op_sobel':np.array([[3,0,3], [10,0,-10], [3,0,-3]])*(1/32.)}\n ampli = 'Amplitude_VV'\n sigma = 'Sigma0_VV'\n incidence = 'incidenceAngleFromEllipsoid' # in degrees\n \n wspd_name = 'Wind speed'\n wspd_attr = {'Long_name': 'Neutral wind speed at 10 metres over the still water level', \n 'Standard_name':'Neutral wind speed at 10 m',\n 'units':'m/s',\n 'resolution':'1 km',\n 'scale_factor':10}\n \n sigma_calc_name = 'NRCS calc'\n sigma_calc_attr = {'Long_name': 'Calculated Normalised Radar Cross Section', \n 'Standard_name':'Sigma nought calculated',\n 'units':'m/s',\n 'resolution':'100 m',\n 'scale_factor':1}\n \n sigma_obs_name = 'NRCS obs'\n sigma_obs_attr = {'Long_name': 'Observed Normalised Radar Cross Section', \n 'Standard_name':'Sigma nought observed',\n 'units':'m/s',\n 'resolution':'100 m',\n 'scale_factor':1}\n \n wdir_name = 'Wind direction'\n wdir_attr = {'Long_name': 'Wind direction with 180 degrees of ambiguity', \n 'Standard_name':'Wind direction',\n 'units':'degrees',\n 'resolution':'4 km',\n 'scale_factor':40}\n \n R_name = 'Alignment'\n R_attr = {'Long_name': 'Mean Resultant Length', \n 'Standard_name':'Alignment',\n 'units':'none',\n 'resolution':'4 km',\n 'scale_factor':40}\n \n ME_name = 'Marginal error'\n ME_attr = {'Long_name': 'Marginal Error of the Mean Resultant Vector', \n 'Standard_name':'Marginal Error',\n 'units':'degrees',\n 'resolution':'4 km',\n 'scale_factor':40}\n \n \n def get_phase_matrix(self, amplitude_matrix):\n \n real = self.convolution_fourier(amplitude_matrix, WIND.kernels['op_sobel'])\n img = self.convolution_fourier(amplitude_matrix, np.transpose(WIND.kernels['op_sobel']))\n lg = real + (img*1j)\n phases = np.angle(lg)\n return phases\n \n def get_direction(self, arr, confidence):\n '''Returns mean direction, mean resultant vector and marginal error'''\n '''array must be contain axial directional data'''\n \n array = arr.flatten()\n angle = np.arctan2(np.mean(np.sin(array)), np.mean(np.cos(array)))*0.5\n R = np.power((np.mean(np.cos(array))**2)+(np.mean(np.sin(array))**2), 0.5)\n print (array.shape)\n print (np.mean(array)*confidence,np.mean(array)*(1-confidence))\n med = st.scoreatpercentile(array, 50, limit=(np.mean(array)*confidence,np.mean(array)*(1-confidence)))\n alpha = np.mean(np.cos(4*(array-angle)))\n ME = 0.5*(np.arcsin(med*np.power((1-alpha)/(2*len(array)*(R**2)), 0.5)))\n return (np.degrees(angle), R, ME)\n \n def get_direction_matrix(self, confidence=0.05, threshold=15, progressive_multilook=False):\n '''Returns mean direction array, mean resultant vector array and marginal error array'''\n ''' confidence: int, 0 to 1. Percintile to remove from its freq. distribution\n Default 0.05, which means it will remove values within distribution\n from 0 to 0.05 anf from 0.95 to 1.\n thershold: int, in degrees. Maximum marginal error in degrees to accept.\n Default is 15 degrees.\n progessive multilook: Boolean, default is False, each imagette is\n independent of the others, pixels belong to only one imagette.\n If True, pixels will belong to multiple imagettes at the same time\n since imagattes will overlap because an imagette is created for each \n pixel independently of multilook value; N of pixels = N of imagettes.\n Each pixel will belong to multiple imagettes, but there will be\n one imagette where this pixel will be the centre of the imagette'''\n \n axial = 2*self.get_phase_matrix(self.get_var_array(self.ds, WIND.ampli))\n angle_matrix = np.zeros(shape=axial.shape)\n R_matrix = np.zeros(shape=axial.shape)\n ME_matrix = np.zeros(shape=axial.shape)\n \n subimages = list(self.gen_imagettes(axial, multilook=WIND.wdir_attr['scale_factor'], progressive_multilook=progressive_multilook))\n for roi in subimages:\n angle, R, ME = self.get_direction(roi[0], confidence=confidence)\n if ME > threshold:\n angle = np.nan\n \n if len(roi[1]) == 2:\n angle_matrix[roi[1][0],roi[1][1]] = angle\n R_matrix[roi[1][0], roi[1][1]] = R\n ME_matrix[roi[1][0], roi[1][1]] = ME\n elif len(roi[1]) == 4:\n angle_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = angle\n R_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = R\n ME_matrix[roi[1][0]:roi[1][1], roi[1][2]:roi[1][3]] = ME\n \n \n self.add_var(self.out, WIND.wdir_name, angle_matrix, WIND.wdir_attr)\n if self.inter == True:\n self.add_var(self.out, WIND.R_name, R_matrix, WIND.R_attr)\n self.add_var(self.out, WIND.ME_name, ME_matrix, WIND.ME_attr)\n\n \n def cmod5n_forward(self,v,phi,theta):\n '''! ---------\n ! cmod5n_forward(v, phi, theta)\n ! inputs:\n ! v in [m/s] wind velocity (always >= 0)\n ! phi in [deg] angle between azimuth and wind direction\n ! (= D - AZM)\n ! theta in [deg] incidence angle\n ! output:\n ! CMOD5_N NORMALIZED BACKSCATTER (LINEAR)\n !\n ! All inputs must be Numpy arrays of equal sizes\n !\n ! A. STOFFELEN MAY 1991 ECMWF CMOD4\n ! A. STOFFELEN, S. DE HAAN DEC 2001 KNMI CMOD5 PROTOTYPE\n ! H. HERSBACH JUNE 2002 ECMWF COMPLETE REVISION\n ! J. de Kloe JULI 2003 KNMI, rewritten in fortan90\n ! A. Verhoef JAN 2008 KNMI, CMOD5 for neutral winds\n ! K.F.Dagestad OCT 2011 NERSC, Vectorized Python version\n !---------------------------------------------------------------------\n '''\n # Ignore overflow errors for wind calculations over land\n warnings.simplefilter(\"ignore\", RuntimeWarning) \n \n DTOR = 57.29577951\n THETM = 40.\n THETHR = 25.\n ZPOW = 1.6\n \n # NB: 0 added as first element below, to avoid switching from 1-indexing to 0-indexing\n C = [0, -0.6878, -0.7957, 0.3380, -0.1728, 0.0000, 0.0040, 0.1103, 0.0159, \n 6.7329, 2.7713, -2.2885, 0.4971, -0.7250, 0.0450, \n 0.0066, 0.3222, 0.0120, 22.7000, 2.0813, 3.0000, 8.3659,\n -3.3428, 1.3236, 6.2437, 2.3893, 0.3249, 4.1590, 1.6930]\n Y0 = C[19]\n PN = C[20]\n A = C[19]-(C[19]-1)/C[20]\n \n B = 1./(C[20]*(C[19]-1.)**(3-1))\n \n # ! ANGLES\n FI=phi/DTOR\n CSFI = np.cos(FI)\n CS2FI= 2.00 * CSFI * CSFI - 1.00\n \n X = (theta - THETM) / THETHR\n XX = X*X\n \n # ! B0: FUNCTION OF WIND SPEED AND INCIDENCE ANGLE\n A0 =C[ 1]+C[ 2]*X+C[ 3]*XX+C[ 4]*X*XX\n A1 =C[ 5]+C[ 6]*X\n A2 =C[ 7]+C[ 8]*X\n \n GAM=C[ 9]+C[10]*X+C[11]*XX\n S0 =C[12]+C[13]*X\n \n # V is missing! Using V=v as substitute, this is apparently correct\n V=v\n S = A2*V\n S_vec = S.copy() \n SlS0 = [S_vec<S0]\n S_vec[SlS0]=S0[SlS0]\n A3=1./(1.+np.exp(-S_vec))\n SlS0 = (S<S0)\n A3[SlS0]=A3[SlS0]*(S[SlS0]/S0[SlS0])**( S0[SlS0]*(1.- A3[SlS0]))\n #A3=A3*(S/S0)**( S0*(1.- A3))\n B0=(A3**GAM)*10.**(A0+A1*V)\n \n # ! B1: FUNCTION OF WIND SPEED AND INCIDENCE ANGLE\n B1 = C[15]*V*(0.5+X-np.tanh(4.*(X+C[16]+C[17]*V)))\n B1 = C[14]*(1.+X)- B1\n B1 = B1/(np.exp( 0.34*(V-C[18]) )+1.)\n \n # ! B2: FUNCTION OF WIND SPEED AND INCIDENCE ANGLE\n V0 = C[21] + C[22]*X + C[23]*XX\n D1 = C[24] + C[25]*X + C[26]*XX\n D2 = C[27] + C[28]*X\n \n V2 = (V/V0+1.)\n V2ltY0 = V2<Y0\n V2[V2ltY0] = A+B*(V2[V2ltY0]-1.)**PN\n B2 = (-D1+D2*V2)*np.exp(-V2)\n \n # ! CMOD5_N: COMBINE THE THREE FOURIER TERMS\n CMOD5_N = B0*(1.0+B1*CSFI+B2*CS2FI)**ZPOW\n return CMOD5_N\n \n \n def cmod5n_inverse(self, sigma0_obs, phi, incidence, iterations=10):\n '''! ---------\n ! cmod5n_inverse(sigma0_obs, phi, incidence, iterations)\n ! inputs:\n ! sigma0_obs Normalized Radar Cross Section [linear units]\n ! phi in [deg] angle between azimuth and wind direction\n ! (= D - AZM)\n ! incidence in [deg] incidence angle\n ! iterations: number of iterations to run\n ! output:\n ! Wind speed, 10 m, neutral stratification \n !\n ! All inputs must be Numpy arrays of equal sizes\n !\n ! This function iterates the forward CMOD5N function\n ! until agreement with input (observed) sigma0 values \n !---------------------------------------------------------------------\n '''\n # Ignore overflow errors for wind calculations over land\n warnings.simplefilter(\"ignore\", RuntimeWarning) \n # First guess wind speed\n V = np.array([10.])*np.ones(sigma0_obs.shape);\n step=10.\n \n # Iterating until error is smaller than threshold\n for iterno in range(1, iterations):\n #print iterno\n sigma0_calc = self.cmod5n_forward(V, phi, incidence)\n ind = sigma0_calc-sigma0_obs>0\n V = V + step\n V[ind] = V[ind] - 2*step \n step = step/2\n \n #mdict={'s0obs':sigma0_obs,'s0calc':sigma0_calc}\n #from scipy.io import savemat\n #savemat('s0test',mdict)\n \n if self.inter == False:\n return (V)\n elif self.inter == True:\n return (V, sigma0_obs, sigma0_calc)\n \n def get_speed_matrix(self):\n \n azimuth = float(self.ds.attrs['azimuth_direction'])\n if self.inter == False:\n speed = self.cmod5n_inverse(self.downsampling_2D(self.get_var_array(self.ds, WIND.sigma), multilook=WIND.wspd_attr['scale_factor']), \n self.downsampling_2D(self.get_var_array(self.out, WIND.wdir_name), multilook=WIND.wspd_attr['scale_factor'])-azimuth, \n self.downsampling_2D(self.get_var_array(self.ds, WIND.incidence)), multilook=WIND.wspd_attr['scale_factor'])\n self.add_var(self.out, WIND.wspd_name, speed, WIND.wspd_attr)\n elif self.inter == True:\n speed, sigma0_obs, sigma0_calc = self.cmod5n_inverse(self.downsampling_2D(self.get_var_array(self.ds, WIND.sigma), multilook=WIND.wspd_attr['scale_factor']), \n self.downsampling_2D(self.get_var_array(self.out, WIND.wdir_name), multilook=WIND.wspd_attr['scale_factor'])-azimuth, \n self.downsampling_2D(self.get_var_array(self.ds, WIND.incidence), multilook=WIND.wspd_attr['scale_factor']))\n self.add_var(self.out, WIND.wspd_name, speed, WIND.wspd_attr)\n self.add_var(self.out, WIND.sigma_obs_name, sigma0_obs, WIND.sigma_obs_attr)\n self.add_var(self.out, WIND.sigma_calc_name, sigma0_calc, WIND.sigma_calc_attr)\n"
] | [
[
"numpy.degrees",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"numpy.tanh",
"numpy.mean",
"numpy.transpose",
"numpy.exp",
"numpy.angle",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jsandersen/CMT | [
"1be6e36b9a6042386395bc654c9dd4b579e6ce6d",
"1be6e36b9a6042386395bc654c9dd4b579e6ce6d"
] | [
"training/Toxic_CNN2_MCD.py",
"training/IMDB_CNN2_BBB.py"
] | [
"import tensorflow as tf\ntf.compat.v1.disable_v2_behavior()\n\nfrom src.datasets.toxic import Toxic\nfrom src.datasets.util import splits\nfrom src.models.cnn2 import getCNN2\nfrom src.models.embedding import * \nfrom src.models.predict import predict_mcdropout\nimport yaml\n\nimport pandas as pd\nimport tensorflow as tf\n\nfrom gensim import models\n\n# load conifig\nwith open('config.yaml', 'r') as f:\n conf = yaml.load(f)\nWord2VecModelPath = conf[\"Word2VecModelPath\"]\n\n# config\nRANDOM_STATE = 1\n\nMAX_SEQUENCE_LENGTH = 500\n\nNUM_SPLITS = 5\nSPLIT_SIZE = 10000\n\ndef build():\n\n \n # get data\n print('read data ...')\n \n toxic = Toxic(clean=True)\n X_train, y_train, X_test, y_test, X_eval, y_eval, word_index = toxic.getRawDataSplits(n_splits=5, test_size=SPLIT_SIZE, random_state=1)\n \n print('create embedding ...')\n # embedding\n w = models.KeyedVectors.load_word2vec_format(Word2VecModelPath, binary=True)\n \n embeddings_index, embedding_dim = get_embeddings_index(w)\n \n w = None\n \n # training\n print('train model ...')\n \n models_n = []\n\n for i in range(NUM_SPLITS):\n model = tf.keras.models.load_model(f'models/toxic/CNN2_BL_{i}')\n models_n.append(model)\n\n # predict\n print('evaluate ...')\n dfs = []\n for m in range(NUM_SPLITS):\n dfs_parts = []\n s = 2500\n j = s\n for i in range(0, SPLIT_SIZE, s):\n dfs_n = predict_mcdropout(models_n[m], X_eval[i:j], y_eval[i:j])\n dfs_parts.append(dfs_n)\n print('#', i, j)\n j+=s\n dfs.append(pd.concat([*dfs_parts], ignore_index=True))\n\n # save\n print('save as dataframe ...')\n name = 'CNN2_MCD'\n i = 0\n for df in dfs:\n df.to_pickle(f\"pickle/toxic/df_{name}_{i}.pkl\")\n i = i+1",
"import tensorflow as tf\ntf.compat.v1.disable_v2_behavior()\n\nfrom src.datasets.imdb import IMDB\nfrom src.datasets.util import splits\nfrom src.models.cnn2 import getCNN2_BBB\nfrom src.models.embedding import * \nfrom src.models.predict import predict_bbb\nimport yaml\n\nimport pandas as pd\n\nfrom gensim import models\nimport tensorflow as tf\n\n# load config\nwith open('config.yaml', 'r') as f:\n conf = yaml.load(f)\nWord2VecModelPath = conf[\"Word2VecModelPath\"]\n\n\n# config\nRANDOM_STATE = 1\n\nMAX_SEQUENCE_LENGTH = 500\nSPLIT_SIZE = 12500\n\nNUM_SPLITS = 5\n\nBATCH_SIZE= 100\nNUM_EPOCHS = 60\n\ndef build():\n\n # get data\n print('read data ...')\n \n imdb = IMDB()\n X_train, y_train, X_test, y_test, X_eval, y_eval, word_index = imdb.getRawDataSplits(n_splits=NUM_SPLITS, test_size=SPLIT_SIZE, random_state=1)\n \n print('create embedding ...')\n \n # embedding\n w = models.KeyedVectors.load_word2vec_format(Word2VecModelPath, binary=True)\n embeddings_index, embedding_dim = get_embeddings_index(w)\n w = None\n \n # training\n print('train model ...')\n \n models_n = []\n\n\n for i in range(NUM_SPLITS):\n embedding_layer = get_embedding_layer(word_index, embeddings_index, embedding_dim)\n model = getCNN2_BBB(MAX_SEQUENCE_LENGTH, 2, embedding_layer, 25000)\n model.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adadelta(learning_rate=1.0, rho=0.95),\n metrics=['accuracy'])\n history = model.fit(X_train[i], y_train[i],\n batch_size=BATCH_SIZE,\n epochs=NUM_EPOCHS,\n validation_data=(X_test[i], y_test[i])\n )\n models_n.append(model)\n\n # predict\n print('evaluate ...')\n dfs = []\n for m in range(NUM_SPLITS):\n dfs_parts = []\n s = 2500\n j = s\n for i in range(0, 12500, s):\n dfs_n = predict_bbb(models_n[m], X_eval[i:j], y_eval[i:j])\n dfs_parts.append(dfs_n)\n print('#', i, j)\n j+=s\n dfs.append(pd.concat([*dfs_parts], ignore_index=True))\n\n # save\n print('save as dataframe ...')\n name = 'CNN2_BBB'\n i = 0\n for df in dfs:\n df.to_pickle(f\"pickle/imdb/{name}_{i}.pkl\")\n i = i+1"
] | [
[
"tensorflow.keras.models.load_model",
"pandas.concat",
"tensorflow.compat.v1.disable_v2_behavior"
],
[
"pandas.concat",
"tensorflow.keras.optimizers.Adadelta",
"tensorflow.compat.v1.disable_v2_behavior"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
arti1117/python-machine-learning-pandas-data-analytics | [
"132b0f3326aeb028348bc9e07d38d18e4ec2e18e"
] | [
"PART04/23_matplotlib_pie.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 9 19:22:34 2020\n\n@author: arti\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nplt.style.use('default')\n\ndf = pd.read_csv('./auto-mpg.csv', header=None)\n\ndf.columns = ['mpg', 'cylinders', 'displacement', 'horsepower', 'weight',\n 'acceleration', 'model year', 'origin', 'name']\n\ndf['count'] = 1\ndf_origin = df.groupby('origin').sum()\nprint(df_origin.head())\n\ndf_origin.index = ['USA', 'EU', 'JPN']\n\ndf_origin['count'].plot(kind='pie',\n figsize=(7, 5),\n autopct='%1.1f%%',\n startangle=10,\n colors=['chocolate', 'bisque', 'cadetblue']\n )\n\nplt.title('Model Origin', size=20)\nplt.axis('equal')\nplt.legend(labels=df_origin.index, loc='upper right')\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
andypbarrett/nsidc-seaice | [
"167a16309f7eaadd5c613b54a7df26eb1f48c2f3"
] | [
"seaice/data/test/test_regression/test_api.py"
] | [
"from unittest.mock import patch\nimport datetime as dt\nimport os\nimport unittest\n\nfrom nose.tools import assert_equals, assert_true, assert_false, assert_raises\nimport numpy as np\nimport numpy.testing as npt\nimport pandas as pd\n\nimport seaice.data as sid\nimport seaice.data.api as api\nimport seaice.data.errors as e\nimport seaice.data.getter as getter\nimport seaice.data.gridset_filters as gf\nimport seaice.nasateam as nt\n\n\nTEST_ROOT = [os.path.join(\n os.path.dirname(__file__),\n os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir,\n 'test_data',\n 'seaice.data'\n)]\n\n\nclass Test_concentration_daily(unittest.TestCase):\n\n def test_concentration_daily(self):\n result = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,\n month=1, day=7, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_false(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n\n def test_missing_day_returns_empty_grid(self):\n result = sid.concentration_daily(hemisphere=nt.NORTH, year=2002,\n month=1, day=1, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_true(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n\n def test_missing_day_raises_when_asked_to(self):\n assert_raises(e.SeaIceDataNoData, sid.concentration_daily,\n hemisphere=nt.NORTH, year=2002,\n month=1, day=1, search_paths=TEST_ROOT,\n allow_empty_gridset=False)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n @patch('seaice.data.locator.daily_file_path')\n def test_with_bad_date_and_empty_gridset_not_allowed(self,\n mock_daily_file_path,\n mock_get_bad_days_for_hemisphere,\n mock__concentration_gridset_by_filelist):\n files = ['doesnt_matter1.bin',\n 'doesnt_matter2.bin'\n 'doesnt_matter3.bin']\n mock_daily_file_path.return_value = files\n shape = (5, 5, 2)\n missing = 255\n mock__concentration_gridset_by_filelist.return_value = {\n 'data': np.full(shape, missing, dtype=np.int),\n 'metadata': {\n 'period_index': pd.period_range('1980-10-21', '1980-10-23', freq='D'),\n 'missing_value': 255,\n 'files': files\n }\n }\n\n bad_dates = pd.period_range('1980-10-20', '1980-10-27', freq='D')\n mock_get_bad_days_for_hemisphere.return_value = bad_dates\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n 1980, 10, 25,\n ['/who/cares'],\n interpolation_radius=0,\n allow_empty_gridset=False,\n allow_bad_dates=False)\n\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n @patch('seaice.data.gridset_filters._interpolate_missing')\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n def test_daily_multiple_files_interpolated(self, mock_daily_file_path,\n _mockgridset_by_filelist, mock__interpolate_missing,\n mock_get_bad_days_for_hemisphere):\n mock_get_bad_days_for_hemisphere.return_value = []\n\n files = ['nt_20150831_n07_v1.1_s.bin',\n 'nt_20150901_n07_v1.1_s.bin',\n 'nt_20150902_n07_v1.1_s.bin']\n gridset = {'data': np.full((2, 2, 3), 2, dtype=np.int),\n 'metadata': {'files': files,\n 'period_index': pd.period_range(start='2015-08-31',\n end='2015-09-02',\n freq='D')}}\n\n mock_daily_file_path.return_value = files\n _mockgridset_by_filelist.return_value = gridset\n\n interpolated = np.full((2, 2), 2, dtype=np.int)\n mock__interpolate_missing.return_value = interpolated\n\n hemisphere = nt.NORTH\n search_paths = ['/anyroot']\n # act\n sid.concentration_daily(hemisphere, 2015, 9, 1, search_paths, interpolation_radius=1)\n\n # assert\n getter._concentration_gridset_by_filelist.assert_called_with(files)\n\n npt.assert_array_equal(mock__interpolate_missing.call_args[0][0], interpolated)\n npt.assert_array_equal(mock__interpolate_missing.call_args[0][1],\n np.full((2, 2, 2), 2, dtype=np.int))\n\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n @patch('seaice.data.gridset_filters._interpolate_missing')\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n def test_no_interpolation_needed_only_includes_file_for_date(self,\n mock_daily_file_path,\n mock__gridset_by_filelist,\n mock__interpolate_missing,\n mock_get_bad_days_for_hemisphere):\n mock_get_bad_days_for_hemisphere.return_value = []\n\n files = ['nt_20112131_n07_v1.1_s.bin',\n 'nt_20120101_n07_v1.1_s.bin',\n 'nt_20120102_n07_v1.1_s.bin']\n gridset = {'data': np.full((2, 2, 3), 4, dtype=np.int),\n 'metadata': {'files': files,\n 'period_index': pd.period_range(start='2011-12-31',\n periods=3,\n freq='D')}}\n\n mock_daily_file_path.return_value = files\n mock__gridset_by_filelist.return_value = gridset\n\n mock__interpolate_missing.return_value = np.full((2, 2), 4, dtype=np.int)\n\n interpolation_radius = 1\n\n nt_hemi = nt.NORTH\n actual_gridset = sid.concentration_daily(nt_hemi,\n 2012,\n 1,\n 1,\n ['/anypaths'],\n interpolation_radius=interpolation_radius)\n actual = actual_gridset['metadata']['files']\n\n expected = ['nt_20120101_n07_v1.1_s.bin']\n\n self.assertEqual(actual, expected)\n\n\nclass Test_concentration_daily_average_over_date_range(unittest.TestCase):\n def test_concentration_daily_average_over_date_range(self):\n date_range = pd.DatetimeIndex(['2001-01-06', '2001-01-07'])\n result = sid.concentration_daily_average_over_date_range('N',\n date_range,\n search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_false(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n\n def test_different_from_each_day(self):\n date_range = pd.DatetimeIndex(['2001-01-06', '2001-01-07'])\n first = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,\n month=1, day=6, search_paths=TEST_ROOT)\n last = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,\n month=1, day=7, search_paths=TEST_ROOT)\n average = sid.concentration_daily_average_over_date_range('N',\n date_range,\n search_paths=TEST_ROOT)\n\n self.assertFalse(np.all(average['data'] == first['data']))\n self.assertFalse(np.all(average['data'] == last['data']))\n\n\nclass Test_concentration_daily___failed_qa_logic(unittest.TestCase):\n\n def setUp(self):\n self.day_before_grid = np.full(nt.NORTH['shape'], 1, dtype=np.int)\n\n target_grid = np.full(nt.NORTH['shape'], 2, dtype=np.int)\n target_grid[0:3, 0:3] = nt.FLAGS['missing']\n self.target_grid = target_grid.copy()\n\n self.day_after_grid = np.full(nt.NORTH['shape'], 11, dtype=np.int)\n\n self.cube = np.dstack([self.day_before_grid, target_grid, self.day_after_grid])\n\n target_grid[0:3, 0:3] = (1 + 11) / 2\n self.interpolated_grid = target_grid.copy()\n\n self.empty_grid = np.full(nt.NORTH['shape'], nt.FLAGS['missing'], dtype=np.int)\n\n self.target_date = dt.date(1980, 10, 25)\n\n self.period_index = pd.period_range(start='1980-10-24', end='1980-10-26', freq='D')\n\n self.file_list = ['nt_19801024_n07_v1.1_n.bin',\n 'nt_19801025_n07_v1.1_n.bin',\n 'nt_19801026_n07_v1.1_n.bin']\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_good_day_interpolates_with_good_days_with_allow_bad_dates_false_and_empty_false(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = []\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n expected_grid = self.interpolated_grid\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = self.file_list\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_good_day_doesnt_interpolate_with_bad_days(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [\n pd.Period(self.target_date - dt.timedelta(1), 'D'),\n pd.Period(self.target_date + dt.timedelta(1), 'D')\n ]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n expected_grid = self.target_grid\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = self.file_list[1:2]\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_raises_when_interpolation_attempt_with_all_bad_days_and_disallowing_bad(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [\n pd.Period(self.target_date - dt.timedelta(1), 'D'),\n pd.Period(self.target_date, 'D'),\n pd.Period(self.target_date + dt.timedelta(1), 'D')\n ]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_bad_day_interpolates_with_good_days_despite_disallowing_bad(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n expected_grid = np.full(nt.NORTH['shape'], 6, dtype=np.int)\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = [self.file_list[0], self.file_list[2]]\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_raises_exception_with_no_data_to_interpolate(self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = True\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = []\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.empty_grid,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_raises_exception_with_bad_data(self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_empty_gridset = False\n allow_bad_dates = False\n interpolation_radius = 0\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = self.file_list[1:2]\n period_index = self.period_index[1:2]\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.target_grid,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_raises_exception_with_no_data(self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path):\n allow_empty_gridset = False\n allow_bad_dates = True\n interpolation_radius = 0\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = []\n mock_daily_file_path.return_value = file_list\n\n with self.assertRaises(e.SeaIceDataNoData):\n sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_empty_gridset=allow_empty_gridset,\n allow_bad_dates=allow_bad_dates)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_returns_interpolated_bad_data_gridset(self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_bad_dates = True\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_bad_dates=allow_bad_dates)\n expected_grid = self.interpolated_grid\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = self.file_list\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.data.locator.daily_file_path')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_returns_empty_grid_when_all_bad_and_disallowed_bad_but_empty_allowed(\n self,\n mock_get_bad_days_for_hemisphere,\n mock_daily_file_path,\n mock__concentration_gridset_by_filelist):\n allow_bad_dates = False\n interpolation_radius = 1\n mock_get_bad_days_for_hemisphere.return_value = [\n pd.Period(self.target_date - dt.timedelta(1), 'D'),\n pd.Period(self.target_date, 'D'),\n pd.Period(self.target_date + dt.timedelta(1), 'D')\n ]\n\n file_list = self.file_list\n mock_daily_file_path.return_value = file_list\n\n gridset = {'data': self.cube,\n 'metadata': {'files': file_list,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n self.target_date.year,\n self.target_date.month,\n self.target_date.day,\n ['/who/cares'],\n interpolation_radius=interpolation_radius,\n allow_bad_dates=allow_bad_dates)\n expected_grid = self.empty_grid\n npt.assert_array_equal(actual['data'], expected_grid)\n\n expected_files = []\n self.assertEqual(actual['metadata']['files'], expected_files)\n\n @patch('seaice.data.getter._concentration_gridset_by_filelist')\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n @patch('seaice.data.locator.daily_file_path')\n def test_with_bad_date_and_empty_gridset_allowed(self,\n mock_daily_file_path,\n mock_get_bad_days_for_hemisphere,\n mock__concentration_gridset_by_filelist):\n allow_bad_dates = False\n files = ['files.1_s.bin']\n mock_daily_file_path.return_value = files\n\n bad_dates = pd.period_range('1980-10-20', '1980-10-27', freq='D')\n mock_get_bad_days_for_hemisphere.return_value = bad_dates\n\n gridset = {'data': self.target_grid,\n 'metadata': {'files': files,\n 'missing_value': nt.FLAGS['missing'],\n 'period_index': self.period_index,\n 'valid_data_range': (0, 100)}}\n\n mock__concentration_gridset_by_filelist.return_value = gridset\n\n actual = sid.concentration_daily(nt.NORTH,\n 1980,\n 10,\n 25,\n ['/who/cares'],\n interpolation_radius=0,\n allow_bad_dates=allow_bad_dates)\n expected = np.full((448, 304), 255, dtype=np.int)\n\n npt.assert_array_equal(actual['data'], expected)\n\n\nclass Test_extent_daily(unittest.TestCase):\n\n def test_calls_ok(self):\n result = sid.extent_daily(hemisphere=nt.NORTH, year=2001,\n month=1, day=7, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_false(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n\n\nclass Test_extent_daily_median(unittest.TestCase):\n @patch('seaice.datastore.get_bad_days_for_hemisphere')\n def test_calls_ok(self, mock_get_bad_days):\n mock_get_bad_days.return_value = []\n result = sid.extent_daily_median(hemisphere=nt.NORTH, start_year=2001, end_year=2002,\n dayofyear=7, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_equals(expected, actual)\n\n\nclass Test_concentration_monthly(unittest.TestCase):\n\n def test_concentration_monthly_with_insufficent_daily_files(self):\n result = sid.concentration_monthly(hemisphere=nt.NORTH, year=2001,\n month=1, search_paths=TEST_ROOT,\n allow_empty_gridset=True)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_true(np.all(result['data'] == 255.))\n assert_equals(result['metadata']['empty_gridset'], True)\n assert_equals(expected, actual)\n\n def test_concentration_monthly_with_sufficient_daily_files(self):\n result = sid.concentration_monthly(hemisphere=nt.NORTH, year=2001,\n month=1, search_paths=TEST_ROOT,\n allow_empty_gridset=True,\n min_days_for_valid_month=2)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_false(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n assert_equals(result['metadata'].get('empty_gridset', False), False)\n\n def test_missing_month_returns_empty_grid(self):\n result = sid.concentration_monthly(hemisphere=nt.NORTH, year=2002,\n month=1, search_paths=TEST_ROOT,\n allow_empty_gridset=True)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_true(np.all(result['data'] == 255.))\n assert_equals(expected, actual)\n assert_equals(result['metadata']['empty_gridset'], True)\n\n def test_missing_month_raises_when_asked_to(self):\n assert_raises(e.SeaIceDataNoData, sid.concentration_monthly,\n hemisphere=nt.NORTH, year=2002,\n month=1, search_paths=TEST_ROOT,\n allow_empty_gridset=False)\n\n\nclass Test_extent_monthly(unittest.TestCase):\n\n def test_calls_ok(self):\n result = sid.extent_monthly(hemisphere=nt.NORTH, year=2001, month=1, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_equals(expected, actual)\n\n\nclass Test_extent_monthly_median(unittest.TestCase):\n\n def test_calls_ok(self):\n result = sid.extent_monthly_median(hemisphere=nt.NORTH, start_year=2001, end_year=2002,\n month=1, search_paths=TEST_ROOT)\n actual = result['data'].shape\n rows, cols = nt.NORTH['shape']\n expected = (rows, cols)\n assert_equals(expected, actual)\n\n\nclass Test__filters(unittest.TestCase):\n def test_no_params(self):\n actual = api._filters()\n expected = []\n self.assertEqual(actual, expected)\n\n @patch('seaice.data.api.functools.partial')\n def test_drop_land(self, mock_partial):\n actual = api._filters(drop_land=True)\n expected = [mock_partial.return_value]\n\n mock_partial.assert_called_once_with(gf.drop_land, nt.FLAGS['land'], nt.FLAGS['coast'])\n self.assertEqual(actual, expected)\n\n def test_bad_dates(self):\n actual = api._filters(allow_bad_dates=False)\n expected = [gf.drop_bad_dates]\n self.assertEqual(actual, expected)\n\n def test_interpolate(self):\n actual = api._filters(interpolation_radius=1)\n expected = [gf.interpolate]\n self.assertEqual(actual, expected)\n\n def test_drop_invalid_ice_with_no_other_params(self):\n actual = api._filters(drop_invalid_ice=True)\n expected = []\n self.assertEqual(actual, expected)\n\n @patch('seaice.data.api.functools.partial')\n @patch('seaice.nasateam.invalid_ice_mask')\n def test_drop_invalid_ice_with_normal_params(self,\n mock_invalid_ice_mask,\n mock_partial):\n hemisphere = nt.NORTH\n\n mask = np.ones((448, 304), dtype=bool)\n mock_invalid_ice_mask.return_value = mask\n\n def wrapped_func():\n pass\n mock_partial.return_value = wrapped_func\n\n actual_filters = api._filters(hemisphere=hemisphere,\n month=1,\n drop_invalid_ice=True)\n\n expected_filters = [wrapped_func]\n\n actual_partial_called_with_func = mock_partial.call_args[0][0]\n actual_partial_called_with_mask = mock_partial.call_args[0][1]\n\n mock_invalid_ice_mask.assert_called_once_with(hemisphere, 1)\n self.assertEqual(actual_partial_called_with_func, gf.drop_invalid_ice)\n npt.assert_array_equal(mask, actual_partial_called_with_mask)\n self.assertEqual(actual_filters, expected_filters)\n\n @patch('seaice.data.api.functools.partial')\n @patch('seaice.data.api._invalid_ice_mask_for_median')\n def test_drop_invalid_ice_with_median_params(self,\n mock_invalid_ice_mask_for_median,\n mock_partial):\n hemisphere = nt.NORTH\n\n mask = np.ones((448, 304), dtype=bool)\n mock_invalid_ice_mask_for_median.return_value = mask\n\n def wrapped_func():\n pass\n mock_partial.return_value = wrapped_func\n\n actual_filters = api._filters(hemisphere=hemisphere,\n start_year=2001,\n end_year=2005,\n dayofyear=60,\n drop_invalid_ice=True)\n\n expected_filters = [wrapped_func]\n\n actual_partial_called_with_func = mock_partial.call_args[0][0]\n actual_partial_called_with_mask = mock_partial.call_args[0][1]\n\n mock_invalid_ice_mask_for_median.assert_called_once_with(2001, 2005, 60, hemisphere)\n self.assertEqual(actual_partial_called_with_func, gf.drop_invalid_ice)\n npt.assert_array_equal(mask, actual_partial_called_with_mask)\n self.assertEqual(actual_filters, expected_filters)\n\n def test_prevent_empty(self):\n actual = api._filters(allow_empty_gridset=False)\n expected = [gf.prevent_empty]\n self.assertEqual(actual, expected)\n\n @patch('seaice.nasateam.invalid_ice_mask')\n @patch('seaice.data.api._invalid_ice_mask_for_median')\n def test_order_prevent_empty_is_last(self,\n mock_invalid_ice_mask,\n mock__invalid_ice_mask_for_median):\n mask = np.ones((448, 304), dtype=bool)\n mock_invalid_ice_mask.return_value = mask\n mock__invalid_ice_mask_for_median.return_value = mask\n\n ALLOW_EMPTY_GRIDSET = False\n\n for drop_land in [True, False, None]:\n for allow_bad_dates in [True, False, None]:\n for interpolation_radius in [0, 1]:\n for drop_invalid_ice in [True, False, None]:\n for hemisphere, month, start_year, end_year, dayofyear in [\n (nt.NORTH, 1, None, None, None),\n (nt.NORTH, None, 2000, 2005, 60),\n (None, None, None, None, None)]:\n actual_filters = api._filters(\n hemisphere=hemisphere,\n month=month,\n drop_land=drop_land,\n allow_bad_dates=allow_bad_dates,\n interpolation_radius=interpolation_radius,\n drop_invalid_ice=drop_invalid_ice,\n\n allow_empty_gridset=ALLOW_EMPTY_GRIDSET)\n\n actual_prevent_empty_index = actual_filters.index(gf.prevent_empty)\n expected_prevent_empty_index = len(actual_filters) - 1\n\n self.assertEqual(actual_prevent_empty_index,\n expected_prevent_empty_index)\n\n @patch('seaice.nasateam.invalid_ice_mask')\n @patch('seaice.data.api._invalid_ice_mask_for_median')\n def test_order_drop_bad_dates_before_interpolate(self,\n mock_invalid_ice_mask,\n mock__invalid_ice_mask_for_median):\n mask = np.ones((448, 304), dtype=bool)\n mock_invalid_ice_mask.return_value = mask\n mock__invalid_ice_mask_for_median.return_value = mask\n\n ALLOW_BAD_DATES = False\n INTERPOLATION_RADIUS = 1\n\n for drop_land in [True, False, None]:\n for drop_invalid_ice in [True, False, None]:\n for hemisphere, month, start_year, end_year, dayofyear in [\n (nt.NORTH, 1, None, None, None),\n (nt.NORTH, None, 2000, 2005, 60),\n (None, None, None, None, None)]:\n for allow_empty_gridset in [True, False, None]:\n actual_filters = api._filters(\n hemisphere=hemisphere,\n month=month,\n drop_land=drop_land,\n drop_invalid_ice=drop_invalid_ice,\n allow_empty_gridset=allow_empty_gridset,\n\n allow_bad_dates=ALLOW_BAD_DATES,\n interpolation_radius=INTERPOLATION_RADIUS)\n\n drop_bad_dates_index = actual_filters.index(gf.drop_bad_dates)\n interpolate_index = actual_filters.index(gf.interpolate)\n\n self.assertLess(drop_bad_dates_index, interpolate_index)\n\n\nclass Test__anomaly_gridset(unittest.TestCase):\n pole_hole_value = 251\n\n def _metadata(self):\n flags = {'pole': self.pole_hole_value}\n\n return {\n 'valid_data_range': (0, 100),\n 'files': [],\n 'period_index': pd.PeriodIndex([], freq='M'),\n 'flags': flags\n }\n\n def _climatology_gridset(self, *data):\n return {'data': np.dstack(data), 'metadata': self._metadata()}\n\n def _month_gridset(self, data):\n return {'data': data, 'metadata': self._metadata()}\n\n def test_subtract_average_climatology_from_month(self):\n month_gridset = self._month_gridset(\n np.array([[25, 25],\n [25, 25]]))\n climatology_gridset = self._climatology_gridset(\n np.array([[17, 15],\n [18, 22]]),\n np.array([[19, 25],\n [20, 16]]))\n\n actual = api._anomaly_gridset(month_gridset, climatology_gridset)\n\n expected = np.array([[25 - 18, 25 - 20],\n [25 - 19, 25 - 19]])\n npt.assert_array_equal(actual['data'], expected)\n\n def test_preserves_values_outside_valid_range_from_climatology(self):\n month_gridset = self._month_gridset(\n np.array([[25, 10],\n [25, 10]]))\n climatology_gridset = self._climatology_gridset(\n np.array([[17, 101],\n [17, 101]]),\n np.array([[19, 101],\n [19, 101]]))\n\n actual = api._anomaly_gridset(month_gridset, climatology_gridset)\n\n expected = np.array([[7, 101],\n [7, 101]])\n npt.assert_array_equal(actual['data'], expected)\n\n def test_preserves_largest_pole_hole_month_gridset(self):\n \"\"\"Tests that the largest pole hole is retained from the\n month_gridset.\n \"\"\"\n month_gridset = self._month_gridset(\n np.array([[self.pole_hole_value, 25],\n [self.pole_hole_value, self.pole_hole_value]]))\n climatology_gridset = self._climatology_gridset(\n np.array([[17, 15],\n [self.pole_hole_value, self.pole_hole_value]]),\n np.array([[19, 25],\n [self.pole_hole_value, self.pole_hole_value]]))\n\n actual = api._anomaly_gridset(month_gridset, climatology_gridset)\n\n expected = np.array([[self.pole_hole_value, 25 - 20],\n [self.pole_hole_value, self.pole_hole_value]])\n npt.assert_array_equal(actual['data'], expected)\n\n def test_preserves_largest_pole_hole_climatology_gridset(self):\n \"\"\"Tests that the largest pole hole is retained from the\n climatology_gridset.\n \"\"\"\n month_gridset = self._month_gridset(\n np.array([[25, 25],\n [self.pole_hole_value, self.pole_hole_value]]))\n climatology_gridset = self._climatology_gridset(\n np.array([[17, self.pole_hole_value],\n [self.pole_hole_value, self.pole_hole_value]]),\n np.array([[19, self.pole_hole_value],\n [self.pole_hole_value, self.pole_hole_value]]))\n\n actual = api._anomaly_gridset(month_gridset, climatology_gridset)\n\n expected = np.array([[25 - 18, self.pole_hole_value],\n [self.pole_hole_value, self.pole_hole_value]])\n npt.assert_array_equal(actual['data'], expected)\n"
] | [
[
"pandas.PeriodIndex",
"pandas.period_range",
"numpy.dstack",
"numpy.full",
"numpy.testing.assert_array_equal",
"pandas.DatetimeIndex",
"numpy.ones",
"numpy.all",
"pandas.Period",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shenghh2015/segmentation_models | [
"473c528c724f62ff38ac127747dd8babb7de6b85",
"473c528c724f62ff38ac127747dd8babb7de6b85",
"473c528c724f62ff38ac127747dd8babb7de6b85"
] | [
"translate/train_model.py",
"thesis/train_HeLa_unet.py",
"thesis/segmentation_models_v1/metrics.py"
] | [
"import os\nimport cv2\nfrom skimage import io\nimport sys\n# import keras\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nfrom natsort import natsorted\n# sys.path.append('../')\nimport segmentation_models_v1 as sm\nfrom segmentation_models_v1 import Unet, Linknet, PSPNet, FPN, AtUnet, ResUnet\nsm.set_framework('tf.keras')\n\nfrom helper_function import plot_history_flu2, save_phase_fl_history, plot_flu_prediction, plot_set_prediction\nfrom helper_function import save_history_for_callback, plot_history_for_callback\nfrom helper_function import precision, recall, f1_score, calculate_psnr, calculate_pearsonr\nfrom sklearn.metrics import confusion_matrix\n\ndef str2bool(value):\n return value.lower() == 'true'\n\ndef generate_folder(folder_name):\n\tif not os.path.exists(folder_name):\n\t\tos.system('mkdir -p {}'.format(folder_name))\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--gpu\", type=str, default = '2')\nparser.add_argument(\"--docker\", type=str2bool, default = True)\nparser.add_argument(\"--net_type\", type=str, default = 'Unet') #Unet, Linknet, PSPNet, FPN\nparser.add_argument(\"--backbone\", type=str, default = 'efficientnetb0')\nparser.add_argument(\"--dataset\", type=str, default = 'neuron_float')\nparser.add_argument(\"--subset\", type=str, default = 'train')\nparser.add_argument(\"--epoch\", type=int, default = 10)\nparser.add_argument(\"--run\", type=int, default = 1)\nparser.add_argument(\"--dim\", type=int, default = 512)\nparser.add_argument(\"--ch_in\", type=int, default = 3)\nparser.add_argument(\"--ch_out\", type=int, default = 3)\nparser.add_argument(\"--fl_ch\", type=str, default = 'fl12')\nparser.add_argument(\"--rot\", type=float, default = 0)\nparser.add_argument(\"--scale\", type=float, default = 100)\nparser.add_argument(\"--train\", type=int, default = None)\nparser.add_argument(\"--act_fun\", type=str, default = 'relu')\nparser.add_argument(\"--loss\", type=str, default = 'mse')\nparser.add_argument(\"--batch_size\", type=int, default = 6)\nparser.add_argument(\"--lr\", type=float, default = 5e-4)\nparser.add_argument(\"--decay\", type=float, default = 0.8)\nparser.add_argument(\"--delta\", type=float, default = 10)\nparser.add_argument(\"--best_select\", type=str2bool, default = True) ## cancel the selection of best model\nparser.add_argument(\"--pre_train\", type=str2bool, default = True)\nargs = parser.parse_args()\nprint(args)\n\nmodel_name = 'Cor-FL1_FL2-net-{}-bone-{}-pre-{}-epoch-{}-batch-{}-lr-{}-dim-{}-train-{}-rot-{}-set-{}-subset-{}-loss-{}-act-{}-scale-{}-decay-{}-delta-{}-chi-{}-cho-{}-chf-{}-bselect-{}-run-{}'.format(args.net_type, args.backbone, args.pre_train,\\\n\t\t args.epoch, args.batch_size, args.lr, args.dim, args.train, args.rot, args.dataset, args.subset, args.loss, args.act_fun, args.scale, args.decay, args.delta, args.ch_in, args.ch_out, args.fl_ch, args.best_select, args.run)\nprint(model_name)\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\nDATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './datasets/{}'.format(args.dataset)\ntrain_dim = args.dim\n\n# load the sample names\ndef read_samples(file_name):\n\t\twith open(file_name, 'r+') as f:\n\t\t\t\tlines = [fn.strip() for fn in f.readlines()]\n\t\treturn lines\n\ndef read_end_points(file_name):\n\t\tsample_dict = {}\n\t\twith open(file_name, 'r+') as f:\n\t\t\t\tfor line in f.readlines():\n\t\t\t\t\t\tsplits = line.strip().split(' ')\n\t\t\t\t\t\tsample_dict[splits[0]] = [int(splits[1]), int(splits[2])]\n\t\treturn sample_dict\n\nsample_dict = None\nif 'neuron' in args.dataset:\n\t\tsample_dict = read_end_points(os.path.join(DATA_DIR, 'range.txt'))\ntrain_fns = read_samples(os.path.join(DATA_DIR, 'train.txt'))\ntest_fns = read_samples(os.path.join(DATA_DIR, 'test.txt'))\ndata_dir = DATA_DIR + '/data'\nval_dim = 1760\n\n# classes for data loading and preprocessing\nclass Dataset:\n \"\"\"CamVid Dataset. Read images, apply augmentation and preprocessing transformations.\n \n Args:\n images_dir (str): path to images folder\n masks_dir (str): path to segmentation masks folder\n class_values (list): values of classes to extract from segmentation mask\n augmentation (albumentations.Compose): data transfromation pipeline \n (e.g. flip, scale, etc.)\n preprocessing (albumentations.Compose): data preprocessing \n (e.g. noralization, shape manipulation, etc.)\n \n \"\"\"\n \n def __init__(\n self, \n data_dir, \n sample_names,\n end_point_dict,\n fl_ch = None,\n scale = 1.0,\n channels = [3,3],\n augmentation=None, \n preprocessing=None,\n ):\n self.images_fps = []\n self.masks1_fps = []\n self.masks2_fps = []\n for sn in sample_names:\n \t\tsample_tag = 'T-' + sn.split('_')[3][5:]\n \t\tif end_point_dict:\n \t\t\t\tend1, end2 = end_point_dict[sample_tag]\n \t\telse:\n \t\t\t\tend1, end2 = 0, np.inf\n \t\tfns = os.listdir(os.path.join(data_dir, sn, 'phase'))\n \t\tfor fn in fns:\n \t\t\t\tif end1 <= int(fn.split('.')[0].split('-')[-1]) <= end2:\n \t\t\t\t\t\tself.images_fps.append(os.path.join(data_dir, sn, 'phase', fn))\n \t\t\t\t\t\tself.masks1_fps.append(os.path.join(data_dir, sn, 'fl1', fn))\n \t\t\t\t\t\tself.masks2_fps.append(os.path.join(data_dir, sn, 'fl2', fn))\n self.ids = self.images_fps\n print('Load files: image {}, fl1: {}, fl2:{}'.format(len(self.images_fps),len(self.masks1_fps),len(self.masks2_fps))) \n self.scale = scale\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n self.channels = channels\n self.fl_ch = fl_ch\n \n def __getitem__(self, i):\n \n # load image and fl1 or fl2 or both\n image = np.load(self.images_fps[i]) * 255.\n if self.fl_ch == 'fl1':\n mask = np.load(self.masks1_fps[i])\n mask = mask * self.scale\n elif self.fl_ch == 'fl2':\n mask = np.load(self.masks2_fps[i])\n mask = mask * self.scale \n elif self.fl_ch == 'fl12':\n mask1 = np.load(self.masks1_fps[i])\n mask2 = np.load(self.masks2_fps[i])\n mask = np.stack([mask1[:,:,1], mask2[:,:,1]], axis = -1)\n mask = mask*self.scale\n \n # decide the input and output channels\n if self.channels[0] == 1:\n image[:,:,0], image[:,:,2] = image[:,:,1], image[:,:,1]\n elif self.channels[0] == 2:\n image[:,:,2] = image[:,:,1]\t\t\n \n if self.channels[1] == 1 and not (self.fl_ch=='fl12'):\n mask = mask[:,:,1:2]\n\n # apply augmentations\n if self.augmentation:\n sample = self.augmentation(image=image, mask=mask)\n image, mask = sample['image'], sample['mask']\n\n # apply preprocessing\n if self.preprocessing:\n sample = self.preprocessing(image=image, mask=mask)\n image, mask = sample['image'], sample['mask']\n\n return image, mask\n \n def __len__(self):\n return len(self.ids)\n \n\nclass Dataloder(tf.keras.utils.Sequence):\n \"\"\"Load data from dataset and form batches\n \n Args:\n dataset: instance of Dataset class for image loading and preprocessing.\n batch_size: Integet number of images in batch.\n shuffle: Boolean, if `True` shuffle image indexes each epoch.\n \"\"\"\n \n def __init__(self, dataset, batch_size=1, shuffle=False):\n self.dataset = dataset\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.indexes = np.arange(len(dataset))\n\n self.on_epoch_end()\n\n def __getitem__(self, i):\n \n # collect batch data\n start = i * self.batch_size\n stop = (i + 1) * self.batch_size\n data = []\n for j in range(start, stop):\n data.append(self.dataset[j])\n \n # transpose list of lists\n batch = [np.stack(samples, axis=0) for samples in zip(*data)]\n \n return (batch[0], batch[1])\n \n def __len__(self):\n \"\"\"Denotes the number of batches per epoch\"\"\"\n return len(self.indexes) // self.batch_size\n \n def on_epoch_end(self):\n \"\"\"Callback function to shuffle indexes each epoch\"\"\"\n if self.shuffle:\n self.indexes = np.random.permutation(self.indexes)\n\nimport albumentations as A\n\ndef round_clip_0_1(x, **kwargs):\n return x.round().clip(0, 1)\n\n# define heavy augmentations\ndef get_training_augmentation(dim, rot = 0):\n train_transform = [\n A.HorizontalFlip(p=0.5),\n A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),\n A.RandomCrop(height=dim, width=dim, always_apply=True),]\n return A.Compose(train_transform)\n\n\ndef get_validation_augmentation(dim = 992):\n \"\"\"Add paddings to make image shape divisible by 32\"\"\"\n test_transform = [\n A.PadIfNeeded(dim, dim)\n ]\n return A.Compose(test_transform)\n\ndef get_preprocessing(preprocessing_fn):\n \"\"\"Construct preprocessing transform\n \n Args:\n preprocessing_fn (callbale): data normalization function \n (can be specific for each pretrained neural network)\n Return:\n transform: albumentations.Compose\n \n \"\"\"\n _transform = [\n A.Lambda(image=preprocessing_fn),\n ]\n return A.Compose(_transform)\n\n## create models\nBACKBONE = args.backbone\nBATCH_SIZE = args.batch_size\nLR = args.lr\nEPOCHS = args.epoch\n\n# processing configuration\npreprocess_input = sm.get_preprocessing(BACKBONE)\n\n# define network parameters\nn_classes = args.ch_out if args.fl_ch == 'fl1' or args.fl_ch == 'fl2' else 2\nactivation = '{}'.format(args.act_fun)\n\n#create model\nnet_func = globals()[args.net_type]\n\nencoder_weights='imagenet' if args.pre_train else None\n\nmodel = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation)\n\n# define optomizer\noptim = tf.keras.optimizers.Adam(LR)\n\nif args.loss == 'mse':\n\tloss = tf.keras.losses.MSE\nelif args.loss == 'mae':\n\tloss = tf.keras.losses.MAE\nelif args.loss == 'huber':\n\tloss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.NONE)\n\nfrom tensorflow.keras import backend as K\ndef pearson(y_true, y_pred):\n x = y_true\n y = y_pred\n mx = K.mean(x)\n my = K.mean(y)\n xm, ym = x-mx, y-my\n r_num = K.sum(tf.multiply(xm,ym))\n r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))\n r = r_num / r_den\n return r\n\nmetrics = [sm.metrics.PSNR(max_val=args.scale), pearson]\n\n# compile keras model with defined optimozer, loss and metrics\nmodel.compile(optim, loss, metrics)\n\n# Dataset for train images\ntrain_dataset = Dataset(\n\t\tdata_dir = data_dir,\n\t\tsample_names = train_fns,\n\t\tend_point_dict = sample_dict,\n fl_ch = args.fl_ch,\n channels = [args.ch_in, args.ch_out],\n scale = args.scale,\n augmentation=get_training_augmentation(train_dim, args.rot),\n preprocessing=get_preprocessing(preprocess_input),\n)\n\n# Dataset for validation images\nvalid_dataset = Dataset(\n\t\tdata_dir = data_dir,\n\t\tsample_names = test_fns,\n\t\tend_point_dict = sample_dict,\n fl_ch = args.fl_ch,\n scale = args.scale,\n channels = [args.ch_in, args.ch_out],\n augmentation=get_validation_augmentation(val_dim),\n preprocessing=get_preprocessing(preprocess_input),\n)\n\ntrain_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\nvalid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)\n\nprint(train_dataloader[0][0].shape)\nprint(train_dataloader[0][1].shape)\nprint(train_dataloader[0][1].min(), train_dataloader[0][1].max())\n# check shapes for errors\nassert train_dataloader[0][0].shape == (BATCH_SIZE, train_dim, train_dim, 3)\nassert train_dataloader[0][1].shape == (BATCH_SIZE, train_dim, train_dim, n_classes)\n\nmodel_folder = '/data/2d_models/{}/{}'.format(args.dataset, model_name) if args.docker else './2d_models/{}/{}'.format(args.dataset, model_name)\ngenerate_folder(model_folder)\n\ndef concat_tile(im_list_2d):\n return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])\n\ndef save_images(file_name, vols):\n\t\tvols = vols[:,:,:,1] if vols.shape[-1] >= 2 else vols[:,:,:,0]\n\t\tshp = vols.shape\n\t\tls, lx, ly = shp\n\t\tsx, sy = int(lx/128), int(ly/128)\n\t\tvols = vols[:,::sx,::sy]\n\t\tslice_list, rows = [], []\n\t\tfor si in range(vols.shape[0]):\n\t\t\t\tslice = vols[si,:,:]\n\t\t\t\tslice[0, :] = 255\n\t\t\t\tslice[:, 0] = 255\n\t\t\t\tslice[:, -1] = 255\n\t\t\t\tslice[-1, :] = 255\n\t\t\t\trows.append(slice)\n\t\t\t\tif si%8 == 7 and not si == vols.shape[0]-1:\n\t\t\t\t\t\tslice_list.append(rows)\n\t\t\t\t\t\trows = []\n\t\tsave_img = concat_tile(slice_list)\t\t\n\t\tcv2.imwrite(file_name, save_img)\n\nclass HistoryPrintCallback(tf.keras.callbacks.Callback):\n\t\tdef __init__(self):\n\t\t\t\tsuper(HistoryPrintCallback, self).__init__()\n\t\t\t\tself.history = {}\n\n\t\tdef on_epoch_end(self, epoch, logs=None):\n\t\t\t\tif logs:\n\t\t\t\t\t\tfor key in logs.keys():\n\t\t\t\t\t\t\t\tif epoch == 0:\n\t\t\t\t\t\t\t\t\t\tself.history[key] = []\n\t\t\t\t\t\t\t\tself.history[key].append(logs[key])\n\t\t\t\tif epoch%5 == 0:\n\t\t\t\t\t\tplot_history_for_callback(model_folder+'/train_history.png', self.history)\n\t\t\t\t\t\tsave_history_for_callback(model_folder, self.history)\n\t\t\t\t\t\timg_vols, gt_vols, pr_vols = [],[],[]\n\t\t\t\t\t\tfor i in range(0, len(valid_dataset),int(len(valid_dataset)/64)):\n\t\t\t\t\t\t\t\timg_vols.append(np.load(valid_dataloader.dataset.images_fps[i]))\n\t\t\t\t\t\t\t\tgt_vols.append(valid_dataloader[i][1])\n\t\t\t\t\t\t\t\tpr_vols.append(self.model.predict(valid_dataloader[i]))\n\t\t\t\t\t\timg_vols = np.stack(img_vols, axis = 0)\n\t\t\t\t\t\tgt_vols = np.concatenate(gt_vols, axis = 0)\n\t\t\t\t\t\tpr_vols = np.concatenate(pr_vols, axis = 0)\n\t\t\t\t\t\tsave_images(model_folder+'/epoch-{}-img.png'.format(epoch), np.uint8(img_vols))\n\t\t\t\t\t\tsave_images(model_folder+'/epoch-{}-gt.png'.format(epoch), gt_vols/args.scale*255)\n\t\t\t\t\t\tsave_images(model_folder+'/epoch-{}-pr.png'.format(epoch), pr_vols/args.scale*255)\n\n\nif not args.best_select:\n\t\tcallbacks = [\n\t\t\t\ttf.keras.callbacks.ModelCheckpoint(model_folder+'/weights_{epoch:02d}.h5', save_weights_only=True, save_best_only=False, period=5),\n\t\t\t\ttf.keras.callbacks.ReduceLROnPlateau(factor=args.decay),\n\t\t\t\tHistoryPrintCallback(),\n\t\t]\nelse:\n\t\tcallbacks = [\n\t\t\t\ttf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', monitor='val_pearson', save_weights_only=True, save_best_only=True, mode='max'),\n\t\t\t\ttf.keras.callbacks.ReduceLROnPlateau(factor=args.decay),\n\t\t\t\tHistoryPrintCallback(),\n\t\t]\n\n\n# train model\nhistory = model.fit_generator(\n train_dataloader, \n steps_per_epoch=len(train_dataloader), \n epochs=EPOCHS, \n callbacks=callbacks, \n validation_data=valid_dataloader, \n validation_steps=len(valid_dataloader),\n)\n\n# evaluate model\ntest_dataset = Dataset(\n x_test_dir, \n y1_test_dir,\n y2_test_dir,\n fl_ch = args.fl_ch,\n channels = [args.ch_in, args.ch_out],\n scale = args.scale,\n augmentation=get_validation_augmentation(val_dim),\n preprocessing=get_preprocessing(preprocess_input),\n)",
"import os\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nimport segmentation_models_v1 as sm\nfrom segmentation_models_v1 import Unet, Linknet, PSPNet, FPN, DUNet, BiFPN, Nestnet, ResUnet, AtUnet\nfrom unet_model import unet_std, unet_std2\nsm.set_framework('tf.keras')\n\nfrom helper_function import plot_deeply_history, plot_history, save_history\nfrom helper_function import precision, recall, f1_score\nfrom sklearn.metrics import confusion_matrix\nfrom helper_function import plot_history_for_callback, save_history_for_callback\n\ndef str2bool(value):\n return value.lower() == 'true'\n\ndef generate_folder(folder_name):\n\tif not os.path.exists(folder_name):\n\t\tos.system('mkdir -p {}'.format(folder_name))\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--docker\", type=str2bool, default = True)\nparser.add_argument(\"--gpu\", type=str, default = '0')\nparser.add_argument(\"--net_type\", type=str, default = 'UNet') #Unet, Linknet, PSPNet, FPN\nparser.add_argument(\"--backbone\", type=str, default = 'efficientnetb3')\nparser.add_argument(\"--feat_version\", type=int, default = None)\nparser.add_argument(\"--epoch\", type=int, default = 2)\nparser.add_argument(\"--dim\", type=int, default = 512)\nparser.add_argument(\"--batch_size\", type=int, default = 2)\nparser.add_argument(\"--dataset\", type=str, default = 'live_dead')\nparser.add_argument(\"--ext\", type=str2bool, default = False)\nparser.add_argument(\"--upsample\", type=str, default = 'upsampling')\nparser.add_argument(\"--pyramid_agg\", type=str, default = 'sum')\nparser.add_argument(\"--filters\", type=int, default = 256)\nparser.add_argument(\"--rot\", type=float, default = 0)\nparser.add_argument(\"--lr\", type=float, default = 1e-3)\nparser.add_argument(\"--bk\", type=float, default = 0.5)\nparser.add_argument(\"--focal_weight\", type=float, default = 1)\nparser.add_argument(\"--pre_train\", type=str2bool, default = True)\nparser.add_argument(\"--train\", type=int, default = None)\nparser.add_argument(\"--loss\", type=str, default = 'focal+dice')\nparser.add_argument(\"--reduce_factor\", type=float, default = 0.1)\nargs = parser.parse_args()\nprint(args)\n\nmodel_name = 'single-net-{}-bone-{}-pre-{}-epoch-{}-batch-{}-lr-{}-dim-{}-train-{}-rot-{}-set-{}-ext-{}-loss-{}-up-{}-filters-{}-red_factor-{}-pyr_agg-{}-bk-{}-fl_weight-{}-fv-{}'.format(args.net_type,\\\n\t\t \targs.backbone, args.pre_train, args.epoch, args.batch_size, args.lr, args.dim,\\\n\t\t \targs.train, args.rot, args.dataset, args.ext, args.loss, args.upsample, args.filters, args.reduce_factor, args.pyramid_agg, args.bk, args.focal_weight, args.feat_version)\nprint(model_name)\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\nif args.dataset == 'live_dead':\n\tval_dim = 832 if not args.net_type == 'PSPNet' else 864\n\ttest_dim = val_dim; img_dim = 832\n\ttrain_image_set = 'train_images2'\n\tval_image_set = 'val_images2'\n\ttest_image_set = 'test_images2'\nelif args.dataset == 'cell_cycle_1984_v2' or args.dataset == 'cell_cycle_1984':\n\tval_dim = 1984 if not args.net_type == 'PSPNet' else 2016\n\ttest_dim = val_dim; img_dim = 1984\n\ttrain_image_set = 'train_images'\n\tval_image_set = 'val_images'\n\ttest_image_set = 'test_images'\n\nDATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './data/{}'.format(args.dataset)\nx_train_dir = os.path.join(DATA_DIR, train_image_set) if not args.ext else os.path.join(DATA_DIR, 'ext_train_images')\ny_train_dir = os.path.join(DATA_DIR, 'train_masks') if not args.ext else os.path.join(DATA_DIR, 'ext_train_masks')\n\nx_valid_dir = os.path.join(DATA_DIR, val_image_set)\ny_valid_dir = os.path.join(DATA_DIR, 'val_masks')\n\nx_test_dir = os.path.join(DATA_DIR, test_image_set)\ny_test_dir = os.path.join(DATA_DIR, 'test_masks')\n\nprint(x_train_dir); print(x_valid_dir); print(x_test_dir)\n# classes for data loading and preprocessing\nclass Dataset:\n \"\"\"CamVid Dataset. Read images, apply augmentation and preprocessing transformations.\n \n Args:\n images_dir (str): path to images folder\n masks_dir (str): path to segmentation masks folder\n class_values (list): values of classes to extract from segmentation mask\n augmentation (albumentations.Compose): data transfromation pipeline \n (e.g. flip, scale, etc.)\n preprocessing (albumentations.Compose): data preprocessing \n (e.g. noralization, shape manipulation, etc.)\n \n \"\"\"\n \n CLASSES = ['bk', 'live', 'inter', 'dead']\n \n def __init__(\n self, \n images_dir, \n masks_dir, \n classes=None,\n nb_data=None,\n augmentation=None, \n preprocessing=None,\n ):\n id_list = os.listdir(images_dir)\n if nb_data ==None:\n self.ids = id_list\n else:\n self.ids = id_list[:int(min(nb_data,len(id_list)))]\n #self.ids = os.listdir(images_dir)\n self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]\n self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]\n #print(self.images_fps[:4]); print(self.masks_fps[:4])\n print(len(self.images_fps)); print(len(self.masks_fps))\n # convert str names to class values on masks\n self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]\n \n self.augmentation = augmentation\n self.preprocessing = preprocessing\n \n def __getitem__(self, i):\n \n # read data\n image = cv2.imread(self.images_fps[i])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n mask = cv2.imread(self.masks_fps[i], 0)\n# print(np.unique(mask))\n # extract certain classes from mask (e.g. cars)\n masks = [(mask == v) for v in self.class_values]\n# print(self.class_values)\n mask = np.stack(masks, axis=-1).astype('float')\n \n # add background if mask is not binary\n if mask.shape[-1] != 1:\n background = 1 - mask.sum(axis=-1, keepdims=True)\n mask = np.concatenate((mask, background), axis=-1)\n \n # apply augmentations\n if self.augmentation:\n sample = self.augmentation(image=image, mask=mask)\n image, mask = sample['image'], sample['mask']\n \n # apply preprocessing\n if self.preprocessing:\n sample = self.preprocessing(image=image, mask=mask)\n image, mask = sample['image'], sample['mask']\n \n return image, mask\n \n def __len__(self):\n return len(self.ids)\n \n \nclass Dataloder(tf.keras.utils.Sequence):\n \"\"\"Load data from dataset and form batches\n \n Args:\n dataset: instance of Dataset class for image loading and preprocessing.\n batch_size: Integet number of images in batch.\n shuffle: Boolean, if `True` shuffle image indexes each epoch.\n \"\"\"\n \n def __init__(self, dataset, batch_size=1, shuffle=False):\n self.dataset = dataset\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.indexes = np.arange(len(dataset))\n\n self.on_epoch_end()\n\n def __getitem__(self, i):\n \n # collect batch data\n start = i * self.batch_size\n stop = (i + 1) * self.batch_size\n data = []\n for j in range(start, stop):\n data.append(self.dataset[j])\n \n # transpose list of lists\n batch = [np.stack(samples, axis=0) for samples in zip(*data)]\n# map_batch = batch[1]\n# map_batch_list = [map_batch]\n# for i in range(4):\n# map_batch_list.append(map_batch[:,::2,::2,:])\n# map_batch = map_batch[:,::2,::2,:]\n# map_batch_list.reverse()\n# map_tuple = ()\n# for i in range(5):\n# map_tuple = map_tuple+(map_batch_list[i],)\n return (batch[0], batch[1])\n \n def __len__(self):\n \"\"\"Denotes the number of batches per epoch\"\"\"\n return len(self.indexes) // self.batch_size\n \n def on_epoch_end(self):\n \"\"\"Callback function to shuffle indexes each epoch\"\"\"\n if self.shuffle:\n self.indexes = np.random.permutation(self.indexes)\n\nimport albumentations as A\n\ndef round_clip_0_1(x, **kwargs):\n return x.round().clip(0, 1)\n\n# define heavy augmentations\ndef get_training_augmentation(dim = 512, rot_limit = 45):\n train_transform = [\n\n A.HorizontalFlip(p=0.5),\n\n\n A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=rot_limit, shift_limit=0.1, p=1, border_mode=0),\n\n A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),\n A.RandomCrop(height=dim, width=dim, always_apply=True),\n\n A.IAAAdditiveGaussianNoise(p=0.2),\n A.IAAPerspective(p=0.5),\n\n A.OneOf(\n [\n A.CLAHE(p=1),\n A.RandomBrightness(p=1),\n A.RandomGamma(p=1),\n ],\n p=0.9,\n ),\n\n A.OneOf(\n [\n A.IAASharpen(p=1),\n A.Blur(blur_limit=3, p=1),\n A.MotionBlur(blur_limit=3, p=1),\n ],\n p=0.9,\n ),\n\n A.OneOf(\n [\n A.RandomContrast(p=1),\n A.HueSaturationValue(p=1),\n ],\n p=0.9,\n ),\n A.Lambda(mask=round_clip_0_1)\n ]\n return A.Compose(train_transform)\n\n\ndef get_validation_augmentation(dim = 832):\n \"\"\"Add paddings to make image shape divisible by 32\"\"\"\n test_transform = [\n A.PadIfNeeded(dim, dim),\n A.RandomCrop(height=dim, width=dim, always_apply=True)\n# A.PadIfNeeded(384, 480)\n ]\n return A.Compose(test_transform)\n\ndef get_preprocessing(preprocessing_fn):\n \"\"\"Construct preprocessing transform\n \n Args:\n preprocessing_fn (callbale): data normalization function \n (can be specific for each pretrained neural network)\n Return:\n transform: albumentations.Compose\n \n \"\"\"\n \n _transform = [\n A.Lambda(image=preprocessing_fn),\n ]\n return A.Compose(_transform)\n\n\n# BACKBONE = 'efficientnetb3'\nBACKBONE = args.backbone\nBATCH_SIZE = args.batch_size\nCLASSES = ['live', 'inter', 'dead']\nLR = args.lr\nEPOCHS = args.epoch\n\npreprocess_input = sm.get_preprocessing(BACKBONE)\n\n# define network parameters\nn_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1) # case for binary and multiclass segmentation\nactivation = 'sigmoid' if n_classes == 1 else 'softmax'\n\n#create model\nnet_func = globals()[args.net_type]\n\nencoder_weights='imagenet' if args.pre_train else None\n\nif args.net_type == 'PSPNet':\n\tmodel = net_func(BACKBONE, encoder_weights=encoder_weights, input_shape = (args.dim, args.dim, 3), classes=n_classes, activation=activation)\nelif args.net_type == 'FPN':\n model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation, pyramid_aggregation = args.pyramid_agg) \nelif args.net_type == 'unet_std' or args.net_type == 'unet_std2':\n\t\tmodel = net_func(classes=n_classes, activation=activation)\nelse:\n model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation,\\\n \t\tdecoder_block_type = args.upsample, feature_version = args.feat_version,\\\n \t\tdecoder_filters=(int(args.filters),int(args.filters/2), int(args.filters/4), int(args.filters/8), int(args.filters/16)))\n print('{}'.format((int(args.filters),int(args.filters/2), int(args.filters/4), int(args.filters/8), int(args.filters/16))))\n# else:\n# model = net_func(BACKBONE, encoder_weights=encoder_weights, input_shape = (args.dim, args.dim, 3), classes=n_classes, activation=activation)\n# model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)\n\n# define optomizer\noptim = tf.keras.optimizers.Adam(LR)\n\nclass_weights = [1,1,1,args.bk]\n# Segmentation models losses can be combined together by '+' and scaled by integer or float factor\n# set class weights for dice_loss (car: 1.; pedestrian: 2.; background: 0.5;)\nif args.loss =='focal+dice':\n\tdice_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))\n\tfocal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()\n\ttotal_loss = dice_loss + (args.focal_weight * focal_loss)\nelif args.loss =='dice':\n\ttotal_loss = sm.losses.DiceLoss(class_weights=np.array(class_weights))\nelif args.loss =='jaccard':\n\ttotal_loss = sm.losses.JaccardLoss(class_weights=np.array(class_weights))\nelif args.loss =='focal+jaccard':\n\tdice_loss = sm.losses.JaccardLoss(class_weights=np.array(class_weights))\n\tfocal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()\n\ttotal_loss = dice_loss + (args.focal_weight * focal_loss)\nelif args.loss =='focal+jaccard+dice':\n\tdice_loss = sm.losses.JaccardLoss(class_weights=np.array(class_weights))\n\tjaccard_loss = sm.losses.JaccardLoss(class_weights=np.array(class_weights))\n\tfocal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()\n\ttotal_loss = dice_loss + jaccard_loss+ (args.focal_weight * focal_loss)\nelif args.loss == 'focal':\n\ttotal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()\nelif args.loss == 'ce':\n\ttotal_loss = sm.losses.BinaryCELoss() if n_classes == 1 else sm.losses.CategoricalCELoss()\nelif args.loss == 'wce':\n\t# weighted wce (bk, live, injured, dead)\n\t#ratios: 0.929, 0.01 , 0.056, 0.004\n\tclass_weights = [1.08, 100., 17.86, 250.]\n\ttotal_loss = sm.losses.BinaryCELoss() if n_classes == 1 else sm.losses.CategoricalCELoss(class_weights=np.array(class_weights))\n# \tfocal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()\n# \ttotal_loss = dice_loss + (args.forcal_weight * focal_loss)\n# actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses\n# total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss \n\nmetrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]\n\n# compile keras model with defined optimozer, loss and metrics\nmodel.compile(optimizer=optim, loss=total_loss, metrics = metrics)\n\n# Dataset for train images\ntrain_dataset = Dataset(\n x_train_dir, \n y_train_dir, \n classes=CLASSES,\n nb_data=args.train,\n augmentation=get_training_augmentation(args.dim, args.rot),\n preprocessing= None,\n)\n\nif args.net_type == 'PSPNet':\n val_dim = args.dim\n\n# Dataset for validation images\nvalid_dataset = Dataset(\n x_valid_dir, \n y_valid_dir, \n classes=CLASSES, \n augmentation=get_validation_augmentation(val_dim),\n preprocessing= None,\n)\n\ntrain_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\nvalid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)\n\nprint(train_dataloader[0][0].shape)\n# check shapes for errors\nassert train_dataloader[0][0].shape == (BATCH_SIZE, args.dim, args.dim, 3)\nassert train_dataloader[0][1].shape == (BATCH_SIZE, args.dim, args.dim, n_classes)\n\nmodel_folder = '/data/thesis_models/{}'.format(model_name) if args.docker else './models/thesis_models/{}'.format(model_name)\ngenerate_folder(model_folder)\n\ndef concat_tile(im_list_2d):\n return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])\n\ndef save_images(file_name, vols):\n# \t\tvols = vols[:,:,:,1] if vols.shape[-1] >= 2 else vols[:,:,:,0]\n\t\tshp = vols.shape\n\t\tls, lx, ly, lc = shp\n\t\tsx, sy = int(lx/256), int(ly/256)\n\t\tvols = vols[:,::sx,::sy,:]\n\t\tslice_list, rows = [], []\n\t\tfor si in range(vols.shape[0]):\n\t\t\t\tslice = vols[si,:,:,:]\n\t\t\t\trows.append(slice)\n\t\t\t\tif si%4 == 3 and not si == vols.shape[0]-1:\n\t\t\t\t\t\tslice_list.append(rows)\n\t\t\t\t\t\trows = []\n\t\tsave_img = concat_tile(slice_list)\t\t\n\t\tcv2.imwrite(file_name, save_img)\n\ndef map2rgb(maps):\n\tshp = maps.shape\n\trgb_maps = np.zeros((shp[0], shp[1], shp[2], 3), dtype=np.uint8)\n\trgb_maps[:,:,:,0] = np.uint8((maps==0)*255)\n\trgb_maps[:,:,:,1] = np.uint8((maps==1)*255)\n\trgb_maps[:,:,:,2] = np.uint8((maps==2)*255)\n\treturn rgb_maps\n\t\nclass HistoryPrintCallback(tf.keras.callbacks.Callback):\n\t\tdef __init__(self):\n\t\t\t\tsuper(HistoryPrintCallback, self).__init__()\n\t\t\t\tself.history = {}\n\n\t\tdef on_epoch_end(self, epoch, logs=None):\n\t\t\t\tif logs:\n\t\t\t\t\t\tfor key in logs.keys():\n\t\t\t\t\t\t\t\tif epoch == 0:\n\t\t\t\t\t\t\t\t\t\tself.history[key] = []\n\t\t\t\t\t\t\t\tself.history[key].append(logs[key])\n\t\t\t\tif epoch%5 == 0:\n\t\t\t\t\t\tplot_history_for_callback(model_folder+'/train_history.png', self.history)\n\t\t\t\t\t\tsave_history_for_callback(model_folder, self.history)\n\t\t\t\t\t\tgt_vols, pr_vols = [],[]\n\t\t\t\t\t\tfor i in range(0, len(valid_dataset),int(len(valid_dataset)/36)):\n\t\t\t\t\t\t\t\tgt_vols.append(valid_dataloader[i][1])\n\t\t\t\t\t\t\t\tpr_vols.append(self.model.predict(valid_dataloader[i]))\n\t\t\t\t\t\tgt_vols = np.concatenate(gt_vols, axis = 0); gt_map = map2rgb(np.argmax(gt_vols,axis =-1))\n\t\t\t\t\t\tpr_vols = np.concatenate(pr_vols, axis = 0); pr_map = map2rgb(np.argmax(pr_vols,axis =-1))\n\t\t\t\t\t\tif epoch == 0:\n\t\t\t\t\t\t\t\tsave_images(model_folder+'/ground_truth.png'.format(epoch), gt_map)\n\t\t\t\t\t\tsave_images(model_folder+'/pr-{}.png'.format(epoch), pr_map)\n\n# define callbacks for learning rate scheduling and best checkpoints saving\nif args.reduce_factor < 1.0:\n\tcallbacks = [\n\t\ttf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', save_weights_only=True, save_best_only=True, mode='min'),\n\t\ttf.keras.callbacks.ReduceLROnPlateau(factor=args.reduce_factor),\n\t\tHistoryPrintCallback(),\n\t]\nelse:\n\tcallbacks = [\n\t\ttf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', save_weights_only=True, save_best_only=True, mode='min'),\n\t\tHistoryPrintCallback(),\n\t]\n\n# train model\nhistory = model.fit_generator(\n train_dataloader, \n steps_per_epoch=len(train_dataloader), \n epochs=EPOCHS, \n callbacks=callbacks, \n validation_data=valid_dataloader, \n validation_steps=len(valid_dataloader),\n)\n\n# save the training information\nplot_history(model_folder+'/train_history.png',history)\nrecord_dir = model_folder+'/train_dir'\ngenerate_folder(record_dir)\nsave_history(record_dir, history)\n\n# evaluate model\n# test_dataset = Dataset(\n# x_test_dir, \n# y_test_dir, \n# classes=CLASSES, \n# augmentation=get_validation_augmentation(test_dim),\n# preprocessing=get_preprocessing(preprocess_input),\n# )\n\n# evaluate model\ntest_dataset = Dataset(\n x_test_dir, \n y_test_dir, \n classes=CLASSES, \n augmentation=get_validation_augmentation(test_dim),\n preprocessing= None,\n)\n\ntest_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)\nif args.net_type == 'FPN':\n model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation, pyramid_aggregation = args.pyramid_agg)\nelse:\n\tmodel = net_func(BACKBONE, encoder_weights=encoder_weights, input_shape = (test_dim, test_dim, 3), classes=n_classes, activation=activation, feature_version = args.feat_version,)\nmodel.compile(optimizer=optim, loss=total_loss, metrics = metrics)\n\n# load best weights\nmodel.load_weights(model_folder+'/best_model.h5')\n\nscores = model.evaluate_generator(test_dataloader)\nprint(\"Loss: {:.5}\".format(scores[0]))\nfor metric, value in zip(metrics, scores[1:]):\n print(\"mean {}: {:.5}\".format(metric.__name__, value))\n\n# calculate the pixel-level classification performance\npr_masks = model.predict(test_dataloader); pr_maps = np.argmax(pr_masks,axis=-1)\ngt_masks = []\nfor i in range(len(test_dataset)):\n _, gt_mask = test_dataset[i];gt_masks.append(gt_mask)\ngt_masks = np.stack(gt_masks);gt_maps = np.argmax(gt_masks,axis=-1)\n\n# crop \nif args.net_type == 'PSPNet':\n\toffset1, offset2 = int((test_dim-img_dim)/2), val_dim-int((test_dim-img_dim)/2)\n\tgt_maps=gt_maps[:,offset1:offset2,offset1:offset2]\n\tpr_maps=pr_maps[:,offset1:offset2,offset1:offset2]\n\tprint('PSP output: {}'.format(pr_maps.shape))\n\ny_true=gt_maps.flatten(); y_pred = pr_maps.flatten()\ncf_mat = confusion_matrix(y_true, y_pred)\ncf_mat_reord = np.zeros(cf_mat.shape)\ncf_mat_reord[1:,1:]=cf_mat[:3,:3];cf_mat_reord[0,1:]=cf_mat[3,0:3]; cf_mat_reord[1:,0]=cf_mat[0:3,3]\ncf_mat_reord[0,0] = cf_mat[3,3]\nprint('Confusion matrix:')\nprint(cf_mat_reord)\nprec_scores = []; recall_scores = []; f1_scores = []; iou_scores=[]\nfor i in range(cf_mat.shape[0]):\n prec_scores.append(precision(i,cf_mat_reord))\n recall_scores.append(recall(i,cf_mat_reord))\n f1_scores.append(f1_score(i,cf_mat_reord))\nprint('Precision:{:.4f},{:,.4f},{:.4f},{:.4f}'.format(prec_scores[0], prec_scores[1], prec_scores[2], prec_scores[3]))\nprint('Recall:{:.4f},{:,.4f},{:.4f},{:.4f}'.format(recall_scores[0], recall_scores[1], recall_scores[2], recall_scores[3]))\n# f1 score\nprint('f1-score (pixel):{:.4f},{:,.4f},{:.4f},{:.4f}'.format(f1_scores[0],f1_scores[1],f1_scores[2],f1_scores[3]))\nprint('mean f1-score (pixel):{:.4f}'.format(np.mean(f1_scores)))\n\nwith open(model_folder+'/metric_summary.txt','w+') as f:\n\t# save iou and dice\n\tfor metric, value in zip(metrics, scores[1:]):\n\t\tf.write(\"mean {}: {:.5}\\n\".format(metric.__name__, value))\n\t# save confusion matrix\n\tf.write('confusion matrix:\\n')\n\tnp.savetxt(f, cf_mat_reord, fmt='%-7d')\n\t# save precision\n\tf.write('precision:{:.4f},{:,.4f},{:.4f},{:.4f}\\n'.format(prec_scores[0], prec_scores[1], prec_scores[2], prec_scores[3]))\n\tf.write('mean precision: {:.4f}\\n'.format(np.mean(prec_scores)))\n\t# save recall\n\tf.write('recall:{:.4f},{:,.4f},{:.4f},{:.4f}\\n'.format(recall_scores[0], recall_scores[1], recall_scores[2], recall_scores[3]))\n\tf.write('mean recall:{:.4f}\\n'.format(np.mean(recall_scores)))\n\t# save f1-score\n\tf.write('f1-score (pixel):{:.4f},{:,.4f},{:.4f},{:.4f}\\n'.format(f1_scores[0],f1_scores[1],f1_scores[2],f1_scores[3]))\n\tf.write('mean f1-score (pixel):{:.4f}\\n'.format(np.mean(f1_scores)))",
"from .base import Metric\nfrom .base import functional as F\nimport tensorflow as tf\n\nSMOOTH = 1e-5\n\n\nclass IOUScore(Metric):\n r\"\"\" The `Jaccard index`_, also known as Intersection over Union and the Jaccard similarity coefficient\n (originally coined coefficient de communauté by Paul Jaccard), is a statistic used for comparing the\n similarity and diversity of sample sets. The Jaccard coefficient measures similarity between finite sample sets,\n and is defined as the size of the intersection divided by the size of the union of the sample sets:\n\n .. math:: J(A, B) = \\frac{A \\cap B}{A \\cup B}\n\n Args:\n class_weights: 1. or ``np.array`` of class weights (``len(weights) = num_classes``).\n class_indexes: Optional integer or list of integers, classes to consider, if ``None`` all classes are used.\n smooth: value to avoid division by zero\n per_image: if ``True``, metric is calculated as mean over images in batch (B),\n else over whole batch\n threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction will not be round\n\n Returns:\n A callable ``iou_score`` instance. Can be used in ``model.compile(...)`` function.\n\n .. _`Jaccard index`: https://en.wikipedia.org/wiki/Jaccard_index\n\n Example:\n\n .. code:: python\n\n metric = IOUScore()\n model.compile('SGD', loss=loss, metrics=[metric])\n \"\"\"\n\n def __init__(\n self,\n class_weights=None,\n class_indexes=None,\n threshold=None,\n per_image=False,\n smooth=SMOOTH,\n name=None,\n ):\n name = name or 'iou_score'\n super().__init__(name=name)\n self.class_weights = class_weights if class_weights is not None else 1\n self.class_indexes = class_indexes\n self.threshold = threshold\n self.per_image = per_image\n self.smooth = smooth\n\n def __call__(self, gt, pr):\n return F.iou_score(\n gt,\n pr,\n class_weights=self.class_weights,\n class_indexes=self.class_indexes,\n smooth=self.smooth,\n per_image=self.per_image,\n threshold=self.threshold,\n **self.submodules\n )\n\n\nclass FScore(Metric):\n r\"\"\"The F-score (Dice coefficient) can be interpreted as a weighted average of the precision and recall,\n where an F-score reaches its best value at 1 and worst score at 0.\n The relative contribution of ``precision`` and ``recall`` to the F1-score are equal.\n The formula for the F score is:\n\n .. math:: F_\\beta(precision, recall) = (1 + \\beta^2) \\frac{precision \\cdot recall}\n {\\beta^2 \\cdot precision + recall}\n\n The formula in terms of *Type I* and *Type II* errors:\n\n .. math:: L(tp, fp, fn) = \\frac{(1 + \\beta^2) \\cdot tp} {(1 + \\beta^2) \\cdot fp + \\beta^2 \\cdot fn + fp}\n\n where:\n - tp - true positives;\n - fp - false positives;\n - fn - false negatives;\n\n Args:\n beta: Integer of float f-score coefficient to balance precision and recall.\n class_weights: 1. or ``np.array`` of class weights (``len(weights) = num_classes``)\n class_indexes: Optional integer or list of integers, classes to consider, if ``None`` all classes are used.\n smooth: Float value to avoid division by zero.\n per_image: If ``True``, metric is calculated as mean over images in batch (B),\n else over whole batch.\n threshold: Float value to round predictions (use ``>`` comparison), if ``None`` prediction will not be round.\n name: Optional string, if ``None`` default ``f{beta}-score`` name is used.\n\n Returns:\n A callable ``f_score`` instance. Can be used in ``model.compile(...)`` function.\n\n Example:\n\n .. code:: python\n\n metric = FScore()\n model.compile('SGD', loss=loss, metrics=[metric])\n \"\"\"\n\n def __init__(\n self,\n beta=1,\n class_weights=None,\n class_indexes=None,\n threshold=None,\n per_image=False,\n smooth=SMOOTH,\n name=None,\n ):\n name = name or 'f{}-score'.format(beta)\n super().__init__(name=name)\n self.beta = beta\n self.class_weights = class_weights if class_weights is not None else 1\n self.class_indexes = class_indexes\n self.threshold = threshold\n self.per_image = per_image\n self.smooth = smooth\n\n def __call__(self, gt, pr):\n return F.f_score(\n gt,\n pr,\n beta=self.beta,\n class_weights=self.class_weights,\n class_indexes=self.class_indexes,\n smooth=self.smooth,\n per_image=self.per_image,\n threshold=self.threshold,\n **self.submodules\n )\n\n\nclass Precision(Metric):\n r\"\"\"Creates a criterion that measures the Precision between the\n ground truth (gt) and the prediction (pr).\n\n .. math:: F_\\beta(tp, fp) = \\frac{tp} {(tp + fp)}\n\n where:\n - tp - true positives;\n - fp - false positives;\n\n Args:\n class_weights: 1. or ``np.array`` of class weights (``len(weights) = num_classes``).\n class_indexes: Optional integer or list of integers, classes to consider, if ``None`` all classes are used.\n smooth: Float value to avoid division by zero.\n per_image: If ``True``, metric is calculated as mean over images in batch (B),\n else over whole batch.\n threshold: Float value to round predictions (use ``>`` comparison), if ``None`` prediction will not be round.\n name: Optional string, if ``None`` default ``precision`` name is used.\n\n Returns:\n A callable ``precision`` instance. Can be used in ``model.compile(...)`` function.\n\n Example:\n\n .. code:: python\n\n metric = Precision()\n model.compile('SGD', loss=loss, metrics=[metric])\n \"\"\"\n\n def __init__(\n self,\n class_weights=None,\n class_indexes=None,\n threshold=None,\n per_image=False,\n smooth=SMOOTH,\n name=None,\n ):\n name = name or 'precision'\n super().__init__(name=name)\n self.class_weights = class_weights if class_weights is not None else 1\n self.class_indexes = class_indexes\n self.threshold = threshold\n self.per_image = per_image\n self.smooth = smooth\n\n def __call__(self, gt, pr):\n return F.precision(\n gt,\n pr,\n class_weights=self.class_weights,\n class_indexes=self.class_indexes,\n smooth=self.smooth,\n per_image=self.per_image,\n threshold=self.threshold,\n **self.submodules\n )\n\n\nclass Recall(Metric):\n r\"\"\"Creates a criterion that measures the Precision between the\n ground truth (gt) and the prediction (pr).\n\n .. math:: F_\\beta(tp, fn) = \\frac{tp} {(tp + fn)}\n\n where:\n - tp - true positives;\n - fn - false negatives;\n\n Args:\n class_weights: 1. or ``np.array`` of class weights (``len(weights) = num_classes``).\n class_indexes: Optional integer or list of integers, classes to consider, if ``None`` all classes are used.\n smooth: Float value to avoid division by zero.\n per_image: If ``True``, metric is calculated as mean over images in batch (B),\n else over whole batch.\n threshold: Float value to round predictions (use ``>`` comparison), if ``None`` prediction will not be round.\n name: Optional string, if ``None`` default ``recall`` name is used.\n\n Returns:\n A callable ``recall`` instance. Can be used in ``model.compile(...)`` function.\n\n Example:\n\n .. code:: python\n\n metric = Precision()\n model.compile('SGD', loss=loss, metrics=[metric])\n \"\"\"\n\n def __init__(\n self,\n class_weights=None,\n class_indexes=None,\n threshold=None,\n per_image=False,\n smooth=SMOOTH,\n name=None,\n ):\n name = name or 'recall'\n super().__init__(name=name)\n self.class_weights = class_weights if class_weights is not None else 1\n self.class_indexes = class_indexes\n self.threshold = threshold\n self.per_image = per_image\n self.smooth = smooth\n\n def __call__(self, gt, pr):\n return F.recall(\n gt,\n pr,\n class_weights=self.class_weights,\n class_indexes=self.class_indexes,\n smooth=self.smooth,\n per_image=self.per_image,\n threshold=self.threshold,\n **self.submodules\n )\n\nclass PSNR(Metric):\n r\"\"\"Creates a criterion that measures the PSRN between the\n ground truth (gt) and the prediction (pr).\n\n Args:\n\t\tmax val: the maximal pixel value in the image\n\n Returns:\n A callable ``psnr`` instance. Can be used in ``model.compile(...)`` function.\n\n Example:\n\n .. code:: python\n\n metric = PSNR()\n model.compile('SGD', loss=loss, metrics=[metric])\n \"\"\"\n\n def __init__(\n self,\n max_val=None,\n name=None,\n ):\n name = name or 'psnr'\n super().__init__(name=name)\n self.max_val = max_val\n\n def __call__(self, gt, pr):\n return tf.image.psnr(gt, pr, max_val = self.max_val)\n\n\nclass Pearson(Metric):\n r\"\"\"Creates a criterion that measures the Pearson correlation coefficient between the\n ground truth (gt) and the prediction (pr).\n\n Args:\n\n Returns:\n A callable ``pearson`` instance. Can be used in ``model.compile(...)`` function.\n\n Example:\n\n .. code:: python\n\n metric = Pearson()\n model.compile('SGD', loss=loss, metrics=[metric])\n \"\"\"\n\n def __init__(\n self,\n name=None,\n ):\n name = name or 'pearson'\n super().__init__(name=name)\n\n def __call__(self, gt, pr):\n return tf.contrib.metrics.streaming_pearson_correlation(gt, pr)\n\n# aliases\niou_score = IOUScore()\nf1_score = FScore(beta=1)\nf2_score = FScore(beta=2)\nprecision = Precision()\nrecall = Recall()\npsnr = PSNR()\npearson = Pearson()\n"
] | [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.multiply",
"numpy.uint8",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"numpy.stack",
"numpy.concatenate",
"tensorflow.keras.losses.Huber",
"tensorflow.keras.backend.square",
"tensorflow.keras.optimizers.Adam",
"numpy.random.permutation",
"tensorflow.keras.backend.mean",
"numpy.load"
],
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"numpy.uint8",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"sklearn.metrics.confusion_matrix",
"numpy.stack",
"numpy.concatenate",
"tensorflow.keras.optimizers.Adam",
"numpy.argmax",
"numpy.mean",
"numpy.random.permutation",
"numpy.savetxt",
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.contrib.metrics.streaming_pearson_correlation",
"tensorflow.image.psnr"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
tetradsensors/tetrad-viz-toolkit | [
"908bed14e676143c4a0b1812d90aed6501479653"
] | [
"viztools/tools/snapshot.py"
] | [
"import numpy as np\nfrom viztools.tools import generate_image\n\nclass Snapshot:\n \"\"\"\n Formalized view of the data saved in our database. This just cleans it up\n and makes sure everything is correct before it can be used elsewhere. \n \"\"\"\n def __init__(self,\n lats,\n lons,\n alts,\n vals,\n vars,\n generate_img=False,\n opac95=3,\n opac05=12,\n colormap='auto',\n timestamp=None,\n param=\"PM2.5\"): # Placeholder, will change once estimate maps are for more metrics\n\n self.lats = np.array(lats).reshape(-1,)\n self.lons = np.array(lons).reshape(-1,)\n self.alts = np.array(alts)\n self.vals = np.array(vals)\n self.vars = np.array(vars)\n self.param = param\n self.timestamp = timestamp\n\n assert self.lats.shape[0] == self.vals.shape[1]\n assert self.lons.shape[0] == self.vals.shape[0]\n assert self.alts.shape == self.vals.shape == self.vars.shape\n\n if generate_img:\n # PIL.Image\n self.img = generate_image._snapshot_to_img_dist_scaled(self,\n largest_size=1300, \n scaling='epa', \n opac95=opac95, \n opac05=opac05,\n colormap=colormap)\n else:\n self.img = None\n\n\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mederrata/probability | [
"89d248c420b8ecabfd9d6de4a1aa8d3886920049",
"89d248c420b8ecabfd9d6de4a1aa8d3886920049",
"89d248c420b8ecabfd9d6de4a1aa8d3886920049",
"89d248c420b8ecabfd9d6de4a1aa8d3886920049",
"89d248c420b8ecabfd9d6de4a1aa8d3886920049",
"89d248c420b8ecabfd9d6de4a1aa8d3886920049",
"89d248c420b8ecabfd9d6de4a1aa8d3886920049",
"89d248c420b8ecabfd9d6de4a1aa8d3886920049",
"89d248c420b8ecabfd9d6de4a1aa8d3886920049"
] | [
"tensorflow_probability/python/vi/optimization_test.py",
"tensorflow_probability/python/distributions/kumaraswamy.py",
"tensorflow_probability/python/math/special.py",
"tensorflow_probability/python/optimizer/differential_evolution.py",
"tensorflow_probability/python/math/psd_kernels/internal/util.py",
"tensorflow_probability/python/distributions/independent.py",
"tensorflow_probability/python/optimizer/linesearch/hager_zhang.py",
"tensorflow_probability/python/bijectors/batch_normalization.py",
"spinoffs/inference_gym/inference_gym/targets/model.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for variational optimization.\"\"\"\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\n\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\n\n\nJAX_MODE = False\n\n\n@test_util.test_all_tf_execution_regimes\nclass OptimizationTests(test_util.TestCase):\n\n @test_util.jax_disable_variable_test\n def test_variational_em(self):\n\n seed = test_util.test_seed()\n\n num_samples = 10000\n mu, sigma = 3., 5.\n x = test_util.test_np_rng().randn(num_samples) * sigma + mu\n\n # Test that the tape automatically picks up any trainable variables in\n # the model, even though it's just a function with no explicit\n # `.trainable_variables`\n likelihood_scale = tfp.util.TransformedVariable(\n 1., tfb.Softplus(), name='scale')\n def trainable_log_prob(z):\n lp = tfd.Normal(0., 1.).log_prob(z)\n lp += tf.reduce_sum(tfd.Normal(\n z[..., tf.newaxis], likelihood_scale).log_prob(x), axis=-1)\n return lp\n\n # For this simple normal-normal model, the true posterior is also normal.\n z_posterior_precision = (1./sigma**2 * num_samples + 1.**2)\n z_posterior_stddev = np.sqrt(1./z_posterior_precision)\n z_posterior_mean = (1./sigma**2 * num_samples * mu) / z_posterior_precision\n\n q_loc = tf.Variable(0., name='mu')\n q_scale = tfp.util.TransformedVariable(1., tfb.Softplus(), name='q_scale')\n q = tfd.Normal(q_loc, q_scale)\n loss_curve = tfp.vi.fit_surrogate_posterior(\n trainable_log_prob, q,\n num_steps=1000,\n sample_size=10,\n optimizer=tf.optimizers.Adam(0.1),\n seed=seed)\n self.evaluate(tf1.global_variables_initializer())\n with tf.control_dependencies([loss_curve]):\n final_q_loc = tf.identity(q.mean())\n final_q_scale = tf.identity(q.stddev())\n final_likelihood_scale = tf.convert_to_tensor(likelihood_scale)\n\n # We expect to recover the true posterior because the variational family\n # includes the true posterior, and the true parameters because we observed\n # a large number of sampled points.\n final_likelihood_scale_, final_q_loc_, final_q_scale_ = self.evaluate((\n final_likelihood_scale, final_q_loc, final_q_scale))\n self.assertAllClose(final_likelihood_scale_, sigma, atol=0.2)\n self.assertAllClose(final_q_loc_, z_posterior_mean, atol=0.2)\n self.assertAllClose(final_q_scale_, z_posterior_stddev, atol=0.1)\n\n @test_util.jax_disable_variable_test\n def test_importance_sampling_example(self):\n init_seed, opt_seed, eval_seed = tfp.random.split_seed(\n test_util.test_seed(sampler_type='stateless'), n=3)\n\n def log_prob(z, x):\n return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)\n conditioned_log_prob = lambda z: log_prob(z, x=5.)\n\n q_z = tfp.experimental.util.make_trainable(tfd.Normal, seed=init_seed)\n # Fit `q` with an importance-weighted variational loss.\n loss_curve = tfp.vi.fit_surrogate_posterior(\n conditioned_log_prob,\n surrogate_posterior=q_z,\n importance_sample_size=10,\n optimizer=tf.optimizers.Adam(learning_rate=0.1),\n num_steps=100,\n seed=opt_seed)\n self.evaluate(tf1.global_variables_initializer())\n loss_curve = self.evaluate(loss_curve)\n\n # Estimate posterior statistics with importance sampling.\n zs, q_log_prob = self.evaluate(q_z.experimental_sample_and_log_prob(\n 1000, seed=eval_seed))\n self_normalized_log_weights = tf.nn.log_softmax(\n conditioned_log_prob(zs) - q_log_prob)\n posterior_mean = tf.reduce_sum(\n tf.exp(self_normalized_log_weights) * zs,\n axis=0)\n self.assertAllClose(posterior_mean, 2.5, atol=1e-1)\n\n posterior_variance = tf.reduce_sum(\n tf.exp(self_normalized_log_weights) * (zs - posterior_mean)**2,\n axis=0)\n self.assertAllClose(posterior_variance, 0.5, atol=1e-1)\n\n # Test reproducibility\n q_z_again = tfp.experimental.util.make_trainable(tfd.Normal, seed=init_seed)\n # Fit `q` with an importance-weighted variational loss.\n loss_curve_again = tfp.vi.fit_surrogate_posterior(\n conditioned_log_prob,\n surrogate_posterior=q_z_again,\n importance_sample_size=10,\n optimizer=tf.optimizers.Adam(learning_rate=0.1),\n num_steps=100,\n seed=opt_seed)\n self.evaluate(tf1.global_variables_initializer())\n loss_curve_again = self.evaluate(loss_curve_again)\n self.assertAllClose(loss_curve_again, loss_curve)\n\n @test_util.jax_disable_variable_test\n def test_fit_posterior_with_joint_q(self):\n\n # Target distribution: equiv to MVNFullCovariance(cov=[[1., 1.], [1., 2.]])\n def p_log_prob(z, x):\n return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)\n\n # The Q family is a joint distribution that can express any 2D MVN.\n b = tf.Variable([0., 0.])\n l = tfp.util.TransformedVariable(tf.eye(2), tfb.FillScaleTriL())\n def trainable_q_fn():\n z = yield tfd.JointDistributionCoroutine.Root(\n tfd.Normal(b[0], l[0, 0], name='z'))\n _ = yield tfd.Normal(b[1] + l[1, 0] * z, l[1, 1], name='x')\n q = tfd.JointDistributionCoroutine(trainable_q_fn)\n\n seed = test_util.test_seed()\n loss_curve = tfp.vi.fit_surrogate_posterior(\n p_log_prob, q, num_steps=1000, sample_size=100,\n optimizer=tf.optimizers.Adam(learning_rate=0.1),\n seed=seed)\n self.evaluate(tf1.global_variables_initializer())\n loss_curve_ = self.evaluate((loss_curve))\n\n # Since the Q family includes the true distribution, the optimized\n # loss should be (approximately) zero.\n self.assertAllClose(loss_curve_[-1], 0., atol=0.1)\n\n @test_util.jax_disable_variable_test\n def test_inhomogeneous_poisson_process_example(self):\n # Toy 1D data.\n index_points = np.array([-10., -7.2, -4., -0.1, 0.1, 4., 6.2, 9.]).reshape(\n [-1, 1]).astype(np.float32)\n observed_counts = np.array(\n [100, 90, 60, 13, 18, 37, 55, 42]).astype(np.float32)\n\n # Trainable GP hyperparameters.\n kernel_log_amplitude = tf.Variable(0., name='kernel_log_amplitude')\n kernel_log_lengthscale = tf.Variable(0., name='kernel_log_lengthscale')\n observation_noise_log_scale = tf.Variable(\n 0., name='observation_noise_log_scale')\n\n # Generative model.\n def model_fn():\n kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(\n amplitude=tf.exp(kernel_log_amplitude),\n length_scale=tf.exp(kernel_log_lengthscale))\n latent_log_rates = yield tfd.JointDistributionCoroutine.Root(\n tfd.GaussianProcess(\n kernel,\n index_points=index_points,\n observation_noise_variance=tf.exp(observation_noise_log_scale),\n name='latent_log_rates'))\n yield tfd.Independent(\n tfd.Poisson(log_rate=latent_log_rates),\n reinterpreted_batch_ndims=1, name='y')\n model = tfd.JointDistributionCoroutine(model_fn, name='model')\n\n # Variational model.\n logit_locs = tf.Variable(tf.zeros(observed_counts.shape))\n logit_softplus_scales = tf.Variable(tf.ones(observed_counts.shape) * -1)\n def variational_model_fn():\n _ = yield tfd.JointDistributionCoroutine.Root(tfd.Independent(\n tfd.Normal(loc=logit_locs,\n scale=tf.nn.softplus(logit_softplus_scales)),\n reinterpreted_batch_ndims=1))\n _ = yield tfd.VectorDeterministic(observed_counts)\n q = tfd.JointDistributionCoroutine(variational_model_fn,\n name='variational_model')\n\n losses, sample_path = tfp.vi.fit_surrogate_posterior(\n target_log_prob_fn=lambda *args: model.log_prob(args),\n surrogate_posterior=q,\n optimizer=tf.optimizers.Adam(learning_rate=0.1),\n num_steps=100,\n seed=test_util.test_seed(),\n sample_size=1,\n trace_fn=lambda t: (t.loss, q.sample(seed=42)[0]))\n\n self.evaluate(tf1.global_variables_initializer())\n losses_, sample_path_ = self.evaluate((losses, sample_path))\n self.assertLess(losses_[-1], 80.) # Optimal loss is roughly 40.\n # Optimal latent logits are approximately the log observed counts.\n self.assertAllClose(sample_path_[-1], np.log(observed_counts), atol=1.0)\n\n\n@test_util.test_all_tf_execution_regimes\nclass StatelessOptimizationTests(test_util.TestCase):\n\n def test_importance_sampling_example(self):\n if not JAX_MODE:\n self.skipTest('Requires optax.')\n import optax # pylint: disable=g-import-not-at-top\n\n init_seed, opt_seed, eval_seed = tfp.random.split_seed(\n test_util.test_seed(sampler_type='stateless'), n=3)\n\n def log_prob(z, x):\n return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)\n conditioned_log_prob = lambda z: log_prob(z, x=5.)\n\n init_normal, build_normal = tfp.experimental.util.make_trainable_stateless(\n tfd.Normal)\n # Fit `q` with an importance-weighted variational loss.\n optimized_parameters, _ = tfp.vi.fit_surrogate_posterior_stateless(\n conditioned_log_prob,\n build_surrogate_posterior_fn=build_normal,\n initial_parameters=init_normal(seed=init_seed),\n importance_sample_size=10,\n optimizer=optax.adam(0.1),\n num_steps=200,\n seed=opt_seed)\n q_z = build_normal(*optimized_parameters)\n\n # Estimate posterior statistics with importance sampling.\n zs, q_log_prob = self.evaluate(q_z.experimental_sample_and_log_prob(\n 1000, seed=eval_seed))\n self_normalized_log_weights = tf.nn.log_softmax(\n conditioned_log_prob(zs) - q_log_prob)\n posterior_mean = tf.reduce_sum(\n tf.exp(self_normalized_log_weights) * zs,\n axis=0)\n self.assertAllClose(posterior_mean, 2.5, atol=1e-1)\n\n posterior_variance = tf.reduce_sum(\n tf.exp(self_normalized_log_weights) * (zs - posterior_mean)**2,\n axis=0)\n self.assertAllClose(posterior_variance, 0.5, atol=1e-1)\n\n def test_inhomogeneous_poisson_process_example(self):\n opt_seed, eval_seed = tfp.random.split_seed(\n test_util.test_seed(sampler_type='stateless'), n=2)\n\n # Toy 1D data.\n index_points = np.array([-10., -7.2, -4., -0.1, 0.1, 4., 6.2, 9.]).reshape(\n [-1, 1]).astype(np.float32)\n observed_counts = np.array(\n [100, 90, 60, 13, 18, 37, 55, 42]).astype(np.float32)\n\n # Generative model.\n def model_fn():\n kernel_amplitude = yield tfd.LogNormal(\n loc=0., scale=1., name='kernel_amplitude')\n kernel_lengthscale = yield tfd.LogNormal(\n loc=0., scale=1., name='kernel_lengthscale')\n observation_noise_scale = yield tfd.LogNormal(\n loc=0., scale=1., name='observation_noise_scale')\n kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(\n amplitude=kernel_amplitude,\n length_scale=kernel_lengthscale)\n latent_log_rates = yield tfd.GaussianProcess(\n kernel,\n index_points=index_points,\n observation_noise_variance=observation_noise_scale,\n name='latent_log_rates')\n yield tfd.Independent(tfd.Poisson(log_rate=latent_log_rates),\n reinterpreted_batch_ndims=1,\n name='y')\n model = tfd.JointDistributionCoroutineAutoBatched(model_fn)\n pinned = model.experimental_pin(y=observed_counts)\n\n initial_parameters = (0., 0., 0., # Raw kernel parameters.\n tf.zeros_like(observed_counts), # `logit_locs`\n tf.zeros_like(observed_counts)) # `logit_raw_scales`\n\n def build_surrogate_posterior_fn(\n raw_kernel_amplitude, raw_kernel_lengthscale,\n raw_observation_noise_scale,\n logit_locs, logit_raw_scales):\n\n def variational_model_fn():\n # Fit the kernel parameters as point masses.\n yield tfd.Deterministic(\n tf.nn.softplus(raw_kernel_amplitude), name='kernel_amplitude')\n yield tfd.Deterministic(\n tf.nn.softplus(raw_kernel_lengthscale), name='kernel_lengthscale')\n yield tfd.Deterministic(\n tf.nn.softplus(raw_observation_noise_scale),\n name='kernel_observation_noise_scale')\n # Factored normal posterior over the GP logits.\n yield tfd.Independent(\n tfd.Normal(loc=logit_locs,\n scale=tf.nn.softplus(logit_raw_scales)),\n reinterpreted_batch_ndims=1,\n name='latent_log_rates')\n return tfd.JointDistributionCoroutineAutoBatched(variational_model_fn)\n\n if not JAX_MODE:\n return\n import optax # pylint: disable=g-import-not-at-top\n\n [\n optimized_parameters,\n (losses, _, sample_path)\n ] = tfp.vi.fit_surrogate_posterior_stateless(\n target_log_prob_fn=pinned.unnormalized_log_prob,\n build_surrogate_posterior_fn=build_surrogate_posterior_fn,\n initial_parameters=initial_parameters,\n optimizer=optax.adam(learning_rate=0.1),\n sample_size=1,\n num_steps=500,\n trace_fn=lambda traceable_quantities: ( # pylint: disable=g-long-lambda\n traceable_quantities.loss,\n tf.nn.softplus(traceable_quantities.parameters[0]),\n build_surrogate_posterior_fn(\n *traceable_quantities.parameters).sample(seed=eval_seed)[-1]),\n seed=opt_seed)\n surrogate_posterior = build_surrogate_posterior_fn(*optimized_parameters)\n surrogate_posterior.sample(seed=eval_seed)\n\n losses_, sample_path_ = self.evaluate((losses, sample_path))\n self.assertLess(losses_[-1], 80.) # Optimal loss is roughly 40.\n # Optimal latent logits are approximately the log observed counts.\n self.assertAllClose(sample_path_[-1], np.log(observed_counts), atol=1.0)\n\n\nif __name__ == '__main__':\n test_util.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Kumaraswamy distribution class.\"\"\"\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import math as tfp_math\nfrom tensorflow_probability.python.bijectors import invert\nfrom tensorflow_probability.python.bijectors import kumaraswamy_cdf\nfrom tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector\nfrom tensorflow_probability.python.bijectors import softplus as softplus_bijector\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.distributions import uniform\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import tensor_util\n\n__all__ = [\n 'Kumaraswamy',\n]\n\n_kumaraswamy_sample_note = \"\"\"Note: `x` must have dtype `self.dtype` and be in\n`[0, 1].` It must have a shape compatible with `self.batch_shape()`.\"\"\"\n\n\ndef _harmonic_number(x):\n \"\"\"Compute the harmonic number from its analytic continuation.\n\n Derivation from [here](\n https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)\n and [Euler's constant](\n https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).\n\n Args:\n x: input float.\n\n Returns:\n z: The analytic continuation of the harmonic number for the input.\n \"\"\"\n one = tf.ones([], dtype=x.dtype)\n return tf.math.digamma(x + one) - tf.math.digamma(one)\n\n\nclass Kumaraswamy(transformed_distribution.TransformedDistribution):\n \"\"\"Kumaraswamy distribution.\n\n The Kumaraswamy distribution is defined over the `(0, 1)` interval using\n parameters\n `concentration1` (aka 'alpha') and `concentration0` (aka 'beta'). It has a\n shape similar to the Beta distribution, but is easier to reparameterize.\n\n #### Mathematical Details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; alpha, beta) = alpha * beta * x**(alpha - 1) * (1 - x**alpha)**(beta -\n 1)\n ```\n\n where:\n\n * `concentration1 = alpha`,\n * `concentration0 = beta`,\n\n Distribution parameters are automatically broadcast in all functions; see\n examples for details.\n\n #### Examples\n\n ```python\n # Create a batch of three Kumaraswamy distributions.\n alpha = [1, 2, 3]\n beta = [1, 2, 3]\n dist = Kumaraswamy(alpha, beta)\n\n dist.sample([4, 5]) # Shape [4, 5, 3]\n\n # `x` has three batch entries, each with two samples.\n x = [[.1, .4, .5],\n [.2, .3, .5]]\n # Calculate the probability of each pair of samples under the corresponding\n # distribution in `dist`.\n dist.prob(x) # Shape [2, 3]\n ```\n\n ```python\n # Create batch_shape=[2, 3] via parameter broadcast:\n alpha = [[1.], [2]] # Shape [2, 1]\n beta = [3., 4, 5] # Shape [3]\n dist = Kumaraswamy(alpha, beta)\n\n # alpha broadcast as: [[1., 1, 1,],\n # [2, 2, 2]]\n # beta broadcast as: [[3., 4, 5],\n # [3, 4, 5]]\n # batch_Shape [2, 3]\n dist.sample([4, 5]) # Shape [4, 5, 2, 3]\n\n x = [.2, .3, .5]\n # x will be broadcast as [[.2, .3, .5],\n # [.2, .3, .5]],\n # thus matching batch_shape [2, 3].\n dist.prob(x) # Shape [2, 3]\n ```\n\n \"\"\"\n\n def __init__(self,\n concentration1=1.,\n concentration0=1.,\n validate_args=False,\n allow_nan_stats=True,\n name='Kumaraswamy'):\n \"\"\"Initialize a batch of Kumaraswamy distributions.\n\n Args:\n concentration1: Positive floating-point `Tensor` indicating mean\n number of successes; aka 'alpha'. Implies `self.dtype` and\n `self.batch_shape`, i.e.,\n `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.\n concentration0: Positive floating-point `Tensor` indicating mean\n number of failures; aka 'beta'. Otherwise has same semantics as\n `concentration1`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value '`NaN`' to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([concentration1, concentration0],\n dtype_hint=tf.float32)\n concentration1 = tensor_util.convert_nonref_to_tensor(\n concentration1, name='concentration1', dtype=dtype)\n concentration0 = tensor_util.convert_nonref_to_tensor(\n concentration0, name='concentration0', dtype=dtype)\n self._kumaraswamy_cdf = kumaraswamy_cdf.KumaraswamyCDF(\n concentration1=concentration1,\n concentration0=concentration0,\n validate_args=validate_args)\n super(Kumaraswamy, self).__init__(\n distribution=uniform.Uniform(\n low=tf.zeros([], dtype=dtype),\n high=tf.ones([], dtype=dtype),\n allow_nan_stats=allow_nan_stats),\n bijector=invert.Invert(\n self._kumaraswamy_cdf, validate_args=validate_args),\n parameters=parameters,\n name=name)\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n # pylint: disable=g-long-lambda\n return dict(\n concentration1=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),\n concentration0=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))\n # pylint: enable=g-long-lambda\n\n @property\n def concentration1(self):\n \"\"\"Concentration parameter associated with a `1` outcome.\"\"\"\n return self._kumaraswamy_cdf.concentration1\n\n @property\n def concentration0(self):\n \"\"\"Concentration parameter associated with a `0` outcome.\"\"\"\n return self._kumaraswamy_cdf.concentration0\n\n experimental_is_sharded = False\n\n def _entropy(self):\n a = tf.convert_to_tensor(self.concentration1)\n b = tf.convert_to_tensor(self.concentration0)\n return ((1 - 1. / b) + (1 - 1. / a) * _harmonic_number(b) -\n tf.math.log(a) - tf.math.log(b))\n\n def _log_cdf(self, x):\n return tfp_math.log1mexp(self.concentration0 * tf.math.log1p(\n -x ** self.concentration1))\n\n def _log_moment(self, n, concentration1=None, concentration0=None):\n \"\"\"Compute the n'th (uncentered) moment.\"\"\"\n concentration0 = tf.convert_to_tensor(\n self.concentration0) if concentration0 is None else concentration0\n concentration1 = tf.convert_to_tensor(\n self.concentration1) if concentration1 is None else concentration1\n total_concentration = concentration1 + concentration0\n expanded_concentration1 = tf.broadcast_to(concentration1,\n tf.shape(total_concentration))\n expanded_concentration0 = tf.broadcast_to(concentration0,\n tf.shape(total_concentration))\n beta_arg = 1 + n / expanded_concentration1\n return (tf.math.log(expanded_concentration0) +\n tfp_math.lbeta(beta_arg, expanded_concentration0))\n\n def _mean(self):\n return tf.exp(self._log_moment(1))\n\n def _variance(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n log_moment2 = self._log_moment(\n 2, concentration1=concentration1, concentration0=concentration0)\n log_moment1 = self._log_moment(\n 1, concentration1=concentration1, concentration0=concentration0)\n lswe, sign = tfp_math.reduce_weighted_logsumexp(\n tf.stack([log_moment2, 2 * log_moment1], axis=-1),\n [1., -1],\n axis=-1,\n return_sign=True)\n return sign * tf.exp(lswe)\n\n @distribution_util.AppendDocstring(\n \"\"\"Note: The mode is undefined when `concentration1 <= 1` or\n `concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`\n is used for undefined modes. If `self.allow_nan_stats` is `False` an\n exception is raised when one or more modes are undefined.\"\"\")\n def _mode(self):\n a = tf.convert_to_tensor(self.concentration1)\n b = tf.convert_to_tensor(self.concentration0)\n mode = ((a - 1) / (a * b - 1))**(1. / a)\n if self.allow_nan_stats:\n return tf.where((a > 1.) & (b > 1.), mode,\n dtype_util.as_numpy_dtype(self.dtype)(np.nan))\n\n return distribution_util.with_dependencies([\n assert_util.assert_less(\n tf.ones([], dtype=a.dtype),\n a,\n message='Mode undefined for concentration1 <= 1.'),\n assert_util.assert_less(\n tf.ones([], dtype=b.dtype),\n b,\n message='Mode undefined for concentration0 <= 1.')\n ], mode)\n\n def _default_event_space_bijector(self):\n return sigmoid_bijector.Sigmoid(validate_args=self.validate_args)\n\n def _parameter_control_dependencies(self, is_init):\n return self.bijector.bijector._parameter_control_dependencies(is_init) # pylint: disable=protected-access\n\n def _sample_control_dependencies(self, x):\n \"\"\"Checks the validity of a sample.\"\"\"\n assertions = []\n if not self.validate_args:\n return assertions\n assertions.append(assert_util.assert_non_negative(\n x, message='Sample must be non-negative.'))\n assertions.append(assert_util.assert_less_equal(\n x, tf.ones([], x.dtype),\n message='Sample must be less than or equal to `1`.'))\n return assertions\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Implements special functions in TensorFlow.\"\"\"\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.internal import custom_gradient as tfp_custom_gradient\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n\n__all__ = [\n 'atan_difference',\n 'dawsn',\n 'erfcinv',\n 'erfcx',\n 'igammainv',\n 'igammacinv',\n 'round_exponential_bump_function',\n 'lambertw',\n 'lambertw_winitzki_approx',\n 'logerfc',\n 'logerfcx',\n 'log_gamma_correction',\n 'log_gamma_difference',\n 'lbeta',\n 'owens_t',\n]\n\n\ndef atan_difference(x, y, name=None):\n \"\"\"Difference of arctan(x) and arctan(y).\n\n Computes arctan(x) - arctan(y) avoiding catastrophic cancellation. This is\n by resorting to the identity:\n\n ```none\n arctan(x) - arctan(y) = arctan((x - y) / (1 + x * y)) +\n pi * sign(x) * 1_{x * y < -1)\n ```\n\n where `1_A` is the indicator function on the set `A`.\n\n For a derivation of this fact, see [1].\n\n\n #### References\n [1] De Stefano, Sum of Arctangents\n https://sites.google.com/site/micdestefano/mathematics/trigonometry/sum-of-arctangents\n\n Args:\n x: Floating-point Tensor. Should be broadcastable with `y`.\n y: Floating-point Tensor. Should be broadcastable with `x`.\n name: Optional Python `str` naming the operation.\n\n Returns:\n z: Tensor of same shape and dtype as `x` and `y`.\n \"\"\"\n with tf.name_scope(name or 'atan_difference'):\n dtype = dtype_util.common_dtype([x, y], tf.float32)\n x = tf.convert_to_tensor(x, dtype=dtype)\n y = tf.convert_to_tensor(y, dtype=dtype)\n\n difference = tf.math.atan((x - y) / (1 + x * y))\n difference = difference + tf.where(\n x * y < - 1., np.pi * tf.math.sign(x), 0.)\n difference = tf.where(\n tf.math.equal(x * y, -1.), np.pi * tf.math.sign(x) / 2., difference)\n\n return difference\n\n\ndef _dawsn_naive(x):\n \"\"\"Returns the Dawson Integral computed at x elementwise.\"\"\"\n dtype = dtype_util.common_dtype([x], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n x = tf.convert_to_tensor(x, dtype=dtype)\n\n n1 = [\n 1.13681498971755972054E-11,\n 8.49262267667473811108E-10,\n 1.94434204175553054283E-8,\n 9.53151741254484363489E-7,\n 3.07828309874913200438E-6,\n 3.52513368520288738649E-4,\n -8.50149846724410912031E-4,\n 4.22618223005546594270E-2,\n -9.17480371773452345351E-2,\n 9.99999999999999994612E-1]\n\n d1 = [\n 2.40372073066762605484E-11,\n 1.48864681368493396752E-9,\n 5.21265281010541664570E-8,\n 1.27258478273186970203E-6,\n 2.32490249820789513991E-5,\n 3.25524741826057911661E-4,\n 3.48805814657162590916E-3,\n 2.79448531198828973716E-2,\n 1.58874241960120565368E-1,\n 5.74918629489320327824E-1,\n 1.00000000000000000539E0]\n\n n2 = [\n 5.08955156417900903354E-1,\n -2.44754418142697847934E-1,\n 9.41512335303534411857E-2,\n -2.18711255142039025206E-2,\n 3.66207612329569181322E-3,\n -4.23209114460388756528E-4,\n 3.59641304793896631888E-5,\n -2.14640351719968974225E-6,\n 9.10010780076391431042E-8,\n -2.40274520828250956942E-9,\n 3.59233385440928410398E-11]\n\n d2 = [\n 1.00000000000000000000E0,\n -6.31839869873368190192E-1,\n 2.36706788228248691528E-1,\n -5.31806367003223277662E-2,\n 8.48041718586295374409E-3,\n -9.47996768486665330168E-4,\n 7.81025592944552338085E-5,\n -4.55875153252442634831E-6,\n 1.89100358111421846170E-7,\n -4.91324691331920606875E-9,\n 7.18466403235734541950E-11]\n\n n3 = [\n -5.90592860534773254987E-1,\n 6.29235242724368800674E-1,\n -1.72858975380388136411E-1,\n 1.64837047825189632310E-2,\n -4.86827613020462700845E-4]\n\n d3 = [\n 1.00000000000000000000E0,\n -2.69820057197544900361E0,\n 1.73270799045947845857E0,\n -3.93708582281939493482E-1,\n 3.44278924041233391079E-2,\n -9.73655226040941223894E-4]\n\n n1, d1, n2, d2, n3, d3 = [\n [numpy_dtype(c) for c in lst] for lst in (n1, d1, n2, d2, n3, d3)]\n\n abs_x = tf.math.abs(x)\n\n result_small = abs_x * tf.math.polyval(\n n1, tf.math.square(x)) / tf.math.polyval(d1, tf.math.square(x))\n result_small = tf.math.sign(x) * result_small\n\n inv_xsq = tf.math.reciprocal(tf.math.square(x))\n result_medium = tf.math.reciprocal(abs_x) + inv_xsq * (\n tf.math.polyval(n2, inv_xsq) / (abs_x * tf.math.polyval(d2, inv_xsq)))\n result_medium = 0.5 * tf.math.sign(x) * result_medium\n\n result_very_large = 0.5 * tf.math.sign(x) * tf.math.reciprocal(abs_x)\n\n result_large = tf.math.reciprocal(abs_x) + inv_xsq * (\n tf.math.polyval(n3, inv_xsq) / (abs_x * tf.math.polyval(d3, inv_xsq)))\n result_large = 0.5 * tf.math.sign(x) * result_large\n\n return tf.where(\n abs_x < 3.25,\n result_small,\n tf.where(\n abs_x < 6.25,\n result_medium,\n tf.where(\n abs_x > 1e9,\n result_very_large,\n result_large)))\n\n\ndef _dawsn_fwd(x):\n \"\"\"Compute output, aux (collaborates with _dawsn_bwd).\"\"\"\n output = _dawsn_naive(x)\n return output, (x,)\n\n\ndef _dawsn_bwd(aux, g):\n \"\"\"Reverse mode impl for dawsn.\"\"\"\n x, = aux\n y = _dawsn_custom_gradient(x)\n return g * (1. - 2 * x * y)\n\n\ndef _dawsn_jvp(primals, tangents):\n \"\"\"Computes JVP for dawsn (supports JAX custom derivative).\"\"\"\n x, = primals\n dx, = tangents\n\n y = _dawsn_custom_gradient(x)\n return y, dx * (1. - 2 * x * y)\n\n\n@tfp_custom_gradient.custom_gradient(\n vjp_fwd=_dawsn_fwd,\n vjp_bwd=_dawsn_bwd,\n jvp_fn=_dawsn_jvp)\ndef _dawsn_custom_gradient(x):\n return _dawsn_naive(x)\n\n\ndef dawsn(x, name=None):\n \"\"\"Computes Dawson's integral element-wise.\n\n Dawson's integral is defined as `exp(-x**2) * int_0^x exp(t**2)`\n with the domain of definition all real numbers.\n\n This implementation is based on the Cephes math library.\n\n Args:\n x: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n dawsn: dawsn evaluated at `x`. A Tensor with the same shape and same\n dtype as `x`.\n \"\"\"\n with tf.name_scope(name or 'dawsn'):\n return _dawsn_custom_gradient(x)\n\n\ndef erfcinv(z, name=None):\n \"\"\"Computes the inverse of `tf.math.erfc` of `z` element-wise.\n\n NOTE: This is mathematically equivalent to computing `erfinv(1 - x)`\n however is more numerically stable.\n\n Args:\n z: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n erfcinv: erfcinv evaluated at `z`. A Tensor with the same shape and same\n dtype as `z`.\n \"\"\"\n with tf.name_scope(name or 'erfcinv'):\n z = tf.convert_to_tensor(z)\n np_dtype = dtype_util.as_numpy_dtype(z.dtype)\n return -tf.math.ndtri(0.5 * z) * np.sqrt(np_dtype(0.5))\n\n\ndef _erfcx_naive(x):\n \"\"\"Compute erfcx using a Chebyshev expansion.\"\"\"\n # The implementation is based on\n # [1] M. Shepherd and J. Laframboise,\n # Chebyshev approximation of (1 + 2 * x) * exp(x**2) * erfc(x)\n # https://www.ams.org/journals/mcom/1981-36-153/S0025-5718-1981-0595058-X/\n\n dtype = dtype_util.common_dtype([x], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n x = tf.convert_to_tensor(x, dtype=dtype)\n x_abs = tf.math.abs(x)\n # TODO(b/180390310): The approximation quality can be made better by sweeping\n # the shift parameter '3.75'.\n y = (x_abs - 3.75) / (x_abs + 3.75)\n\n # The list of coefficients is taken from [1].\n coeff = [\n 3e-21,\n 9.7e-20,\n 2.7e-20,\n -2.187e-18,\n -2.237e-18,\n 5.0681e-17,\n 7.4182e-17,\n -1.250795e-15,\n -1.864563e-15,\n 3.33478119e-14,\n 3.2525481e-14,\n -9.65469675e-13,\n 1.94558685e-13,\n 2.8687950109e-11,\n -6.3180883409e-11,\n -7.75440020883e-10,\n 4.521959811218e-09,\n 1.0764999465671e-08,\n -2.18864010492344e-07,\n 7.74038306619849e-07,\n 4.139027986073010e-06,\n -6.9169733025012064e-05,\n 4.90775836525808632e-04,\n -2.413163540417608191e-03,\n 9.074997670705265094e-03,\n -2.6658668435305752277e-02,\n 5.9209939998191890498e-02,\n -8.4249133366517915584e-02,\n -4.590054580646477331e-03,\n 1.177578934567401754080,\n ]\n\n result = -4e-21\n previous_result = 0.\n for i in range(len(coeff) - 1):\n result, previous_result = (\n 2 * y * result - previous_result + coeff[i], result)\n result = y * result - previous_result + coeff[len(coeff) - 1]\n\n result = result / (1. + 2. * x_abs)\n\n # The approximation is only valid for positive x, so flip the integral.\n # TODO(b/180390310): Improve this approximation for negative values.\n result = tf.where(\n x < 0., 2. * tf.math.exp(tf.math.square(x)) - result, result)\n result = tf.where(tf.math.equal(x, np.inf), numpy_dtype(1.), result)\n return result\n\n\ndef _erfcx_fwd(x):\n \"\"\"Compute output, aux (collaborates with _erfcx_bwd).\"\"\"\n output = _erfcx_naive(x)\n return output, (x,)\n\n\ndef _erfcx_bwd(aux, g):\n x, = aux\n y = _erfcx_custom_gradient(x)\n numpy_dtype = dtype_util.as_numpy_dtype(\n dtype_util.common_dtype([x], tf.float32))\n px = 2. * x * y - numpy_dtype(2. / np.sqrt(np.pi))\n return [px * g]\n\n\ndef _erfcx_jvp(primals, tangents):\n \"\"\"Computes JVP for erfcx (supports JAX custom derivative).\"\"\"\n x, = primals\n dx, = tangents\n\n y = _erfcx_custom_gradient(x)\n numpy_dtype = dtype_util.as_numpy_dtype(\n dtype_util.common_dtype([x], tf.float32))\n px = 2. * x * y - numpy_dtype(2. / np.sqrt(np.pi))\n return y, px * dx\n\n\n@tfp_custom_gradient.custom_gradient(\n vjp_fwd=_erfcx_fwd,\n vjp_bwd=_erfcx_bwd,\n jvp_fn=_erfcx_jvp)\ndef _erfcx_custom_gradient(x):\n \"\"\"Computes Erfcx(x) with correct custom gradient.\"\"\"\n return _erfcx_naive(x)\n\n\ndef erfcx(x, name=None):\n \"\"\"Computes the scaled complementary error function exp(x**) * erfc(x).\n\n # References\n [1] M. Shepherd and J. Laframboise,\n Chebyshev approximation of (1 + 2 * x) * exp(x**2) * erfc(x)\n https://www.ams.org/journals/mcom/1981-36-153/S0025-5718-1981-0595058-X/\n\n Args:\n x: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n erfcx: erfcx(x) evaluated at `x`. A Tensor with the same shape and same\n dtype as `x`.\n \"\"\"\n with tf.name_scope(name or 'logerfc'):\n dtype = dtype_util.common_dtype([x], tf.float32)\n x = tf.convert_to_tensor(x, dtype=dtype)\n return _erfcx_custom_gradient(x)\n\n\ndef logerfc(x, name=None):\n \"\"\"Computes the logarithm of `tf.math.erfc` of `x` element-wise.\n\n NOTE: This is mathematically equivalent to computing `log(erfc(x))`\n however is more numerically stable.\n\n Args:\n x: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n logerfc: log(erfc(x)) evaluated at `x`. A Tensor with the same shape and\n same dtype as `x`.\n \"\"\"\n with tf.name_scope(name or 'logerfc'):\n dtype = dtype_util.common_dtype([x], tf.float32)\n x = tf.convert_to_tensor(x, dtype=dtype)\n safe_positive_x = tf.where(x >= 0., x, 1.)\n safe_negative_x = tf.where(x < 0., x, -1.)\n return tf.where(\n x < 0.,\n tf.math.log(tf.math.erfc(safe_negative_x)),\n # erfcx saturates to zero much slower than erfc.\n tf.math.log(erfcx(safe_positive_x)) - tf.math.square(safe_positive_x))\n\n\ndef logerfcx(x, name=None):\n \"\"\"Computes the logarithm of `tfp.math.erfcx` of `x` element-wise.\n\n NOTE: This is mathematically equivalent to computing `log(erfcx(x))`\n however is more numerically stable.\n\n Args:\n x: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n logerfcx: log(erfcx(x)) evaluated at `x`. A Tensor with the same shape and\n same dtype as `x`.\n \"\"\"\n with tf.name_scope(name or 'logerfc'):\n dtype = dtype_util.common_dtype([x], tf.float32)\n x = tf.convert_to_tensor(x, dtype=dtype)\n safe_positive_x = tf.where(x >= 0., x, 1.)\n safe_negative_x = tf.where(x < 0., x, -1.)\n return tf.where(\n x < 0.,\n # erfcx goes to infinity fast in the left tail.\n tf.math.log(\n tf.math.erfc(safe_negative_x)) + tf.math.square(safe_negative_x),\n tf.math.log(erfcx(safe_positive_x)))\n\n\n# Implementation of Inverse Incomplete Gamma based on\n# A. Didonato and A. Morris,\n# Computation of the Incomplete Gamma Function Ratios and their Inverse\n# https://dl.acm.org/doi/10.1145/22721.23109\n\n\ndef _didonato_eq_twenty_three(log_b, v, a):\n return -log_b + tf.math.xlogy(a - 1., v) - tf.math.log1p((1. - a) / (1. + v))\n\n\ndef _didonato_eq_thirty_two(p, q):\n \"\"\"Compute Equation 32 from Didonato's paper.\"\"\"\n dtype = dtype_util.common_dtype([p, q], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n numerator_coeffs = [\n 0.213623493715853, 4.28342155967104, 11.6616720288968, 3.31125922108741]\n numerator_coeffs = [numpy_dtype(c) for c in numerator_coeffs]\n denominator_coeffs = [\n 0.36117081018842e-1, 1.27364489782223, 6.40691597760039,\n 6.61053765625462, 1.]\n denominator_coeffs = [numpy_dtype(c) for c in denominator_coeffs]\n t = tf.where(\n p < 0.5,\n tf.math.sqrt(-2 * tf.math.log(p)),\n tf.math.sqrt(-2. * tf.math.log(q)))\n result = (t - tf.math.polyval(numerator_coeffs, t) / tf.math.polyval(\n denominator_coeffs, t))\n return tf.where(p < 0.5, -result, result)\n\n\ndef _didonato_eq_thirty_four(a, x):\n \"\"\"Compute Equation 34 from Didonato's paper.\"\"\"\n # This function computes `S_n` in equation thirty four.\n dtype = dtype_util.common_dtype([a, x], tf.float32)\n\n # TODO(b/178793508): Change this tolerance to be dtype dependent.\n tolerance = 1e-4\n\n def _taylor_series(should_stop, index, partial, series_sum):\n partial = partial * x / (a + index)\n series_sum = tf.where(should_stop, series_sum, series_sum + partial)\n # TODO(b/178793508): Change the number of iterations to be dtype dependent.\n should_stop = (partial < tolerance) | (index > 100)\n return should_stop, index + 1, partial, series_sum\n\n _, _, _, series_sum = tf.while_loop(\n cond=lambda stop, *_: tf.reduce_any(~stop),\n body=_taylor_series,\n loop_vars=(\n tf.zeros_like(a + x, dtype=tf.bool),\n tf.cast(1., dtype=dtype),\n tf.ones_like(a + x, dtype=dtype),\n tf.ones_like(a + x, dtype=dtype)))\n return series_sum\n\n\ndef _didonato_eq_twenty_five(a, y):\n \"\"\"Compute Equation 25 from Didonato's paper.\"\"\"\n c1 = tf.math.xlogy(a - 1., y)\n c1_sq = tf.math.square(c1)\n c1_cub = c1_sq * c1\n c1_fourth = tf.math.square(c1_sq)\n a_sq = tf.math.square(a)\n a_cub = a_sq * a\n c2 = (a - 1.) * (1. + c1)\n c3 = (a - 1.) * ((3. * a - 5.) / 2. + c1 * (a - 2. - c1 / 2.))\n c4 = (a - 1.) * (\n (c1_cub / 3.) - (3. * a - 5.) * c1_sq / 2. +\n (a_sq - 6. * a + 7.) * c1 + (11. * a_sq - 46. * a + 47.) / 6.)\n c5 = ((a - 1.) * (-c1_fourth / 4. +\n (11. * a - 17.) * c1_cub / 6 +\n (-3. * a_sq + 13. * a - 13.) * c1_sq +\n (2. * a_cub - 25. * a_sq + 72. * a - 61.) * c1 / 2. +\n (25. * a_cub - 195. * a_sq + 477 * a - 379) / 12.))\n return y + c1 + (((c5 / y + c4) / y + c3 / y) + c2) / y\n\n\ndef _inverse_igamma_initial_approx(a, p, q, use_p_for_logq=True):\n \"\"\"Compute an initial guess for `igammainv(a, p)`.\n\n Compute an initial estimate of `igammainv(a, p)`. This will be further\n refined by Newton-Halley iterations.\n\n Args:\n a: A positive `float` `Tensor`. Must be broadcastable with `p`.\n p: A `float` `Tensor` whose entries lie in `[0, 1]`.\n Must be broadcastable with `a`. This is `1 - q`.\n q: A `float` `Tensor` whose entries lie in `[0, 1]`.\n Must be broadcastable with `a`. This is `1 - p`.\n use_p_for_logq: `bool` describing whether to compute\n `log(q)` by using `log(1 - p)` or `log(q)`.\n Default value: `True`.\n\n Returns:\n igamma_approx: Approximation to `igammainv(a, p)`.\n \"\"\"\n\n dtype = dtype_util.common_dtype([a, p, q], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n a = tf.convert_to_tensor(a, dtype=dtype)\n p = tf.convert_to_tensor(p, dtype=dtype)\n q = tf.convert_to_tensor(q, dtype=dtype)\n\n lgamma_a = tf.math.lgamma(a)\n\n # This ensures that computing log(1 - p) avoids roundoff errors. This is\n # needed since igammacinv and igammainv both use this codepath,\n if use_p_for_logq:\n log_q = tf.math.log1p(-p)\n else:\n log_q = tf.math.log(q)\n\n log_b = log_q + lgamma_a\n\n result = _didonato_eq_twenty_five(a, -log_b)\n\n # The code below is for when a < 1.\n\n v = -log_b - (1. - a) * tf.math.log(-log_b)\n v_sq = tf.math.square(v)\n\n # This is Equation 24.\n result = tf.where(\n log_b > np.log(0.01),\n -log_b - (1. - a) * tf.math.log(v) - tf.math.log(\n (v_sq + 2. * (3. - a) * v + (2. - a) * (3 - a)) /\n (v_sq + (5. - a) * v + 2.)),\n result)\n\n result = tf.where(\n log_b >= np.log(0.15),\n _didonato_eq_twenty_three(log_b, v, a),\n result)\n\n t = tf.math.exp(-np.euler_gamma - tf.math.exp(log_b))\n u = t * tf.math.exp(t)\n result = tf.where(\n (a < 0.3) & (log_b >= np.log(0.35)),\n t * tf.math.exp(u),\n result)\n\n # These are hand tuned constants to compute (p * Gamma(a + 1)) ** (1 / a)\n # TODO(b/178793508): Change these bounds / computation to be dtype dependent.\n # This is Equation 21.\n u = tf.where((tf.math.exp(log_b) * q > 1e-8) & (q > 1e-5),\n tf.math.pow(p * tf.math.exp(lgamma_a) * a,\n tf.math.reciprocal(a)),\n # When (1 - p) * Gamma(a) or (1 - p) is small,\n # we can taylor expand Gamma(a + 1) ** 1 / a to get\n # exp(-euler_gamma for the zeroth order term.\n # Also p ** 1 / a = exp(log(p) / a) = exp(log(1 - q) / a)\n # ~= exp(-q / a) resulting in the following expression.\n tf.math.exp((-q / a) - np.euler_gamma))\n\n result = tf.where(\n (log_b > np.log(0.6)) | ((log_b >= np.log(0.45)) & (a >= 0.3)),\n u / (1. - (u / (a + 1.))),\n result)\n\n # The code below is for when a < 1.\n\n sqrt_a = tf.math.sqrt(a)\n s = _didonato_eq_thirty_two(p, q)\n s_sq = tf.math.square(s)\n s_cub = s_sq * s\n s_fourth = tf.math.square(s_sq)\n s_fifth = s_fourth * s\n\n # This is the Cornish-Fisher 6 term expansion for x (by viewing igammainv as\n # the quantile function for the Gamma distribution). This is equation (31).\n w = a + s * sqrt_a + (s_sq - 1.) / 3.\n w = w + (s_cub - 7. * s) / (36. * sqrt_a)\n w = w - (3. * s_fourth + 7. * s_sq - 16.) / (810 * a)\n w = w + (9. * s_fifth + 256. * s_cub - 433. * s) / (38880 * a * sqrt_a)\n\n # The code below is for when a > 1. and p > 0.5.\n d = tf.math.maximum(numpy_dtype(2.), a * (a - 1.))\n result_a_large_p_large = tf.where(\n log_b <= -d * np.log(10.),\n _didonato_eq_twenty_five(a, -log_b),\n _didonato_eq_twenty_three(\n log_b, _didonato_eq_twenty_three(log_b, w, a), a))\n result_a_large_p_large = tf.where(w < 3. * a, w, result_a_large_p_large)\n # TODO(b/178793508): Change these bounds / computation to be dtype dependent.\n result_a_large_p_large = tf.where(\n (a >= 500.) & (tf.math.abs(1. - w / a) < 1e-6),\n w, result_a_large_p_large)\n\n # The code below is for when a > 1. and p <= 0.5.\n z = w\n v = tf.math.log(p) + tf.math.lgamma(a + 1.)\n\n # The code below follows Equation 35 which involves multiple evaluations of\n # F_i.\n modified_z = tf.math.exp((v + w) / a)\n for _ in range(2):\n s = tf.math.log1p(\n modified_z / (a + 1.) * (\n 1. + modified_z / (a + 2.)))\n modified_z = tf.math.exp(\n (v + modified_z - s) / a)\n\n s = tf.math.log1p(\n modified_z / (a + 1.) * (1. + modified_z / (a + 2.) * (\n 1. + modified_z / (a + 3.))))\n modified_z = tf.math.exp((v + modified_z - s) / a)\n z = tf.where(w <= 0.15 * (a + 1.), modified_z, z)\n\n ls = tf.math.log(_didonato_eq_thirty_four(a, z))\n medium_z = tf.math.exp((v + z - ls) / a)\n result_a_large_p_small = tf.where(\n (z <= 0.01 * (a + 1.)) | (z > 0.7 * (a + 1.)),\n z,\n medium_z * (\n 1. - (\n a * tf.math.log(medium_z) - medium_z - v + ls) / (a - medium_z)))\n\n result_a_large = tf.where(\n p <= 0.5, result_a_large_p_small, result_a_large_p_large)\n result = tf.where(a < 1., result, result_a_large)\n\n # This ensures that computing log(1 - p) avoids roundoff errors. This is\n # needed since igammacinv and igammainv both use this codepath,\n # switching p and q.\n result = tf.where(tf.math.equal(a, 1.), -log_q, result)\n return result\n\n\ndef _shared_igammainv_computation(a, p, is_igammainv=True):\n \"\"\"Shared computation for the igammainv/igammacinv.\"\"\"\n\n dtype = dtype_util.common_dtype([a, p], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n\n if is_igammainv:\n q = 1. - p\n else:\n q = p\n p = 1. - q\n\n x = _inverse_igamma_initial_approx(a, p, q, use_p_for_logq=is_igammainv)\n\n # Run 3 steps of Newton-Halley method.\n for _ in range(3):\n factorial = tf.math.exp(a * tf.math.log(x) - x - tf.math.lgamma(a))\n\n f_over_der = tf.where(\n ((p <= 0.9) & is_igammainv) | ((q > 0.9) & (not is_igammainv)),\n (tf.math.igamma(a, x) - p) * x / factorial,\n -(tf.math.igammac(a, x) - q) * x / factorial)\n second_der_over_der = -1. + (a - 1.) / x\n modified_x = tf.where(\n tf.math.is_inf(second_der_over_der),\n # Use Newton's method if the second derivative is not available.\n x - f_over_der,\n # Use Halley's method otherwise. Halley's method is:\n # x_{n+1} = x_n - f(x_n) / f'(x_n) * (\n # 1 - f(x_n) / f'(x_n) * 0.5 f''(x_n) / f'(x_n))\n x - f_over_der / (1. - 0.5 * f_over_der * second_der_over_der))\n x = tf.where(tf.math.equal(factorial, 0.), x, modified_x)\n x = tf.where((a < 0.) | (p < 0.) | (p > 1.), numpy_dtype(np.nan), x)\n x = tf.where(tf.math.equal(p, 0.), numpy_dtype(0.), x)\n x = tf.where(tf.math.equal(p, 1.), numpy_dtype(np.inf), x)\n\n return x\n\n\ndef _igammainv_fwd(a, p):\n \"\"\"Compute output, aux (collaborates with _igammainv_bwd).\"\"\"\n output = _shared_igammainv_computation(a, p, is_igammainv=True)\n return output, (a, p)\n\n\ndef _igammainv_partials(a, x):\n \"\"\"Compute partial derivatives of `igammainv(a, x)`.\"\"\"\n # Partials for igamma.\n\n # This function does not have gradients in TF, and thus using\n # `stop_gradient` does not change behavior in TF.\n # Ideally, it would be nice to throw an exception when taking gradients of\n # this function in JAX mode, but this is not possible at the moment with\n # `custom_jvp`. See https://github.com/google/jax/issues/5913 for details.\n # TODO(https://github.com/google/jax/issues/5913): remove stop_gradients.\n igamma_partial_a = tf.raw_ops.IgammaGradA(\n a=tf.stop_gradient(a), x=tf.stop_gradient(x))\n igamma_partial_x = tf.math.exp(\n -x + tf.math.xlogy(a - 1., x) - tf.math.lgamma(a))\n\n # Use the fact that igamma and igammainv are inverses of each other to compute\n # the gradients.\n igammainv_partial_a = -igamma_partial_a / igamma_partial_x\n igammainv_partial_x = tf.math.reciprocal(igamma_partial_x)\n return igammainv_partial_a, igammainv_partial_x\n\n\ndef _igammainv_bwd(aux, g):\n \"\"\"Reverse mode impl for igammainv.\"\"\"\n a, p = aux\n x = _igammainv_custom_gradient(a, p)\n # Use the fact that igamma and igammainv are inverses to compute the gradient.\n pa, pp = _igammainv_partials(a, x)\n return _fix_gradient_for_broadcasting(a, p, pa * g, pp * g)\n\n\ndef _igammainv_jvp(primals, tangents):\n \"\"\"Computes JVP for igammainv (supports JAX custom derivative).\"\"\"\n a, p = primals\n da, dp = tangents\n # TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?\n bc_shp = prefer_static.broadcast_shape(prefer_static.shape(da),\n prefer_static.shape(dp))\n da = tf.broadcast_to(da, bc_shp)\n dp = tf.broadcast_to(dp, bc_shp)\n\n x = _igammainv_custom_gradient(a, p)\n pa, pp = _igammainv_partials(a, x)\n\n return x, pa * da + pp * dp\n\n\n@tfp_custom_gradient.custom_gradient(\n vjp_fwd=_igammainv_fwd,\n vjp_bwd=_igammainv_bwd,\n jvp_fn=_igammainv_jvp)\ndef _igammainv_custom_gradient(a, p):\n return _shared_igammainv_computation(a, p, is_igammainv=True)\n\n\ndef igammainv(a, p, name=None):\n \"\"\"Computes the inverse to `tf.math.igamma` with respect to `p`.\n\n This function is defined as the solution `x` to the equation\n `p = tf.math.igamma(a, x)`.\n\n # References\n [1] A. Didonato and A. Morris,\n Computation of the Incomplete Gamma Function Ratios and their Inverse\n https://dl.acm.org/doi/10.1145/22721.23109\n\n Args:\n a: A positive `float` `Tensor`. Must be broadcastable with `p`.\n p: A `float` `Tensor` whose entries lie in `[0, 1]`.\n Must be broadcastable with `a`.\n name: Optional Python `str` naming the operation.\n\n Returns:\n igammainv: igammainv(a, p). Has same type as `a`.\n \"\"\"\n with tf.name_scope(name or 'igammainv'):\n dtype = dtype_util.common_dtype([a, p], tf.float32)\n a = tf.convert_to_tensor(a, dtype=dtype)\n p = tf.convert_to_tensor(p, dtype=dtype)\n return _igammainv_custom_gradient(a, p)\n\n\ndef _igammacinv_fwd(a, p):\n \"\"\"Compute output, aux (collaborates with _igammacinv_bwd).\"\"\"\n output = _shared_igammainv_computation(a, p, is_igammainv=False)\n return output, (a, p)\n\n\ndef _igammacinv_bwd(aux, g):\n \"\"\"Reverse mode impl for igammacinv.\"\"\"\n a, p = aux\n x = _igammacinv_custom_gradient(a, p)\n pa, pp = _igammainv_partials(a, x)\n pp = -pp\n return _fix_gradient_for_broadcasting(a, p, pa * g, pp * g)\n\n\ndef _igammacinv_jvp(primals, tangents):\n \"\"\"Computes JVP for igammacinv (supports JAX custom derivative).\"\"\"\n a, p = primals\n da, dp = tangents\n # TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?\n bc_shp = prefer_static.broadcast_shape(prefer_static.shape(da),\n prefer_static.shape(dp))\n da = tf.broadcast_to(da, bc_shp)\n dp = tf.broadcast_to(dp, bc_shp)\n\n x = _igammacinv_custom_gradient(a, p)\n pa, pp = _igammainv_partials(a, x)\n pp = -pp\n\n return x, pa * da + pp * dp\n\n\n@tfp_custom_gradient.custom_gradient(\n vjp_fwd=_igammacinv_fwd,\n vjp_bwd=_igammacinv_bwd,\n jvp_fn=_igammacinv_jvp)\ndef _igammacinv_custom_gradient(a, p):\n return _shared_igammainv_computation(a, p, is_igammainv=False)\n\n\ndef igammacinv(a, p, name=None):\n \"\"\"Computes the inverse to `tf.math.igammac` with respect to `p`.\n\n This function is defined as the solution `x` to the equation\n `p = tf.math.igammac(a, x)`.\n\n # References\n [1] A. Didonato and A. Morris,\n Computation of the Incomplete Gamma Function Ratios and their Inverse\n https://dl.acm.org/doi/10.1145/22721.23109\n\n Args:\n a: A positive `float` `Tensor`. Must be broadcastable with `p`.\n p: A `float` `Tensor` whose entries lie in `[0, 1]`.\n Must be broadcastable with `a`.\n name: Optional Python `str` naming the operation.\n\n Returns:\n igammacinv: igammacinv(a, p). Has same type as `a`.\n \"\"\"\n\n with tf.name_scope(name or 'igammacinv'):\n dtype = dtype_util.common_dtype([a, p], tf.float32)\n a = tf.convert_to_tensor(a, dtype=dtype)\n p = tf.convert_to_tensor(p, dtype=dtype)\n return _igammacinv_custom_gradient(a, p)\n\n\ndef round_exponential_bump_function(x, name=None):\n r\"\"\"Function supported on [-1, 1], smooth on the real line, with a round top.\n\n Define\n\n ```\n f(x) := exp(-1 / (1 - x**2)) * exp(1), for x in (-1, 1)\n f(x) := 0, for |x| >= 1.\n ```\n\n One can show that f(x)...\n\n * is C^\\infty on the real line.\n * is supported on [-1, 1].\n * is equal to 1 at x = 0.\n * is strictly increasing on (-1, 0).\n * is strictly decreasing on (0, 1).\n * has gradient = 0 at 0.\n\n See [Bump Function](https://en.wikipedia.org/wiki/Bump_function)\n\n Args:\n x: Floating-point Tensor.\n name: Optional Python `str` naming the operation.\n\n Returns:\n y: Tensor of same shape and dtype as `x`.\n \"\"\"\n with tf.name_scope(name or 'round_exponential_bump_function'):\n x = tf.convert_to_tensor(x, name='x')\n one_m_x2 = 1 - x**2\n y = tf.math.exp(1. - tf.math.reciprocal_no_nan(one_m_x2))\n return tf.where(one_m_x2 > 0., y, 0.)\n\n\ndef lambertw_winitzki_approx(z, name=None):\n \"\"\"Computes Winitzki approximation to Lambert W function at z >= -1/exp(1).\n\n The approximation for z >= -1/exp(1) will be used as a starting point in the\n iterative algorithm to compute W(z). See _lambertw_principal_branch() below.\n See\n https://www.researchgate.net/post/Is_there_approximation_to_the_LambertWx_function\n and in particular (38) in\n https://pdfs.semanticscholar.org/e934/24f33e2742016ef18c36a80788400d2f17b4.pdf\n\n Args:\n z: value for which W(z) should be computed. Expected z >= -1/exp(1). If not\n then function will fail due to log(<0).\n name: optionally pass name for output.\n\n Returns:\n lambertw_winitzki_approx: Approximation for W(z) for z >= -1/exp(1).\n \"\"\"\n with tf.name_scope(name or 'lambertw_winitzki_approx'):\n z = tf.convert_to_tensor(z)\n # See eq (38) here:\n # https://pdfs.semanticscholar.org/e934/24f33e2742016ef18c36a80788400d2f17b4.pdf\n # or (10) here:\n # https://hal.archives-ouvertes.fr/hal-01586546/document\n log1pz = tf.math.log1p(z)\n return log1pz * (1. - tf.math.log1p(log1pz) / (2. + log1pz))\n\n\ndef _fritsch_iteration(unused_should_stop, z, w, tol):\n \"\"\"Root finding iteration for W(z) using Fritsch iteration.\"\"\"\n # See Section 2.3 in https://arxiv.org/pdf/1209.0735.pdf\n # Approximate W(z) by viewing iterative algorithm as multiplicative factor\n #\n # W(n+1) = W(n) * (1 + error)\n #\n # where error can be expressed as a function of z and W(n). See paper for\n # details.\n z = tf.convert_to_tensor(z)\n w = tf.convert_to_tensor(w)\n zn = tf.math.log(tf.abs(z)) - tf.math.log(tf.abs(w)) - w\n wp1 = w + 1.0\n q = 2. * wp1 * (wp1 + 2. / 3. * zn)\n q_minus_2zn = q - 2. * zn\n error = zn / wp1 * (1. + zn / q_minus_2zn)\n # Check absolute tolerance (not relative). Here the iteration error is\n # for relative tolerance, as W(n+1) = W(n) * (1 + error). Use\n # W(n+1) - W(n) = W(n) * error to get absolute tolerance.\n converged = abs(error * w) <= tol\n should_stop_next = tf.reduce_all(converged)\n return should_stop_next, w * (1. + error), z, tol\n\n\ndef _halley_iteration(unused_should_stop, w, z, tol, iteration_count):\n \"\"\"Halley's method on root finding of w for the equation w * exp(w) = z.\"\"\"\n w = tf.convert_to_tensor(w)\n z = tf.convert_to_tensor(z)\n f = w - z * tf.math.exp(-w)\n delta = f / (w + 1. - 0.5 * (w + 2.) * f / (w + 1.))\n w_next = w - delta\n converged = tf.math.abs(delta) <= tol * tf.math.abs(w_next)\n # We bound the number of iterations to be at most a 100.\n\n # When x is close to the branch point, the derivatives tend to very large\n # values, which causes the iteration to be slow. For x <= 0., 100 iterations\n # seems to be enough to guarantee a relative error of at most 1e-6.\n\n # The Winitzki approximation has a relative error of at most\n # 0.01. When x >= 0., the first through third derivatives are bounded such\n # that coupled with the initial approximation, we are in the realm of cubic\n # convergence.\n should_stop_next = tf.reduce_all(converged) | (iteration_count >= 100)\n return should_stop_next, w_next, z, tol, iteration_count + 1\n\n\ndef _lambertw_principal_branch(z, name=None):\n \"\"\"Computes Lambert W of `z` element-wise at the principal (k = 0) branch.\n\n The Lambert W function is the inverse of `z = y * tf.exp(y)` and is a\n many-valued function. Here `y = W_0(z)`, where `W_0` is the Lambert W function\n evaluated at the 0-th branch (aka principal branch).\n\n Args:\n z: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n Default value: `None` (i.e., 'lambertw_principal_branch').\n\n Returns:\n lambertw_principal_branch: A Tensor with same shape and same dtype as `z`.\n \"\"\"\n with tf.name_scope(name or 'lambertw_principal_branch'):\n z = tf.convert_to_tensor(z)\n np_finfo = np.finfo(dtype_util.as_numpy_dtype(z.dtype))\n tolerance = tf.convert_to_tensor(2. * np_finfo.resolution, dtype=z.dtype)\n # Start while loop with the initial value at the approximate Lambert W\n # solution, instead of 'z' (for z > -1 / exp(1)). Using 'z' has bad\n # convergence properties especially for large z (z > 5).\n z0 = tf.where(z > -np.exp(-1.), lambertw_winitzki_approx(z), z)\n z0 = tf.while_loop(cond=lambda stop, *_: ~stop,\n body=_halley_iteration,\n loop_vars=(False, z0, z, tolerance, 0))[1]\n return tf.cast(z0, dtype=z.dtype)\n\n\ndef _lambert_fwd(z):\n \"\"\"Compute output, aux (collaborates with _lambert_bwd).\"\"\"\n wz = _lambertw_principal_branch(z)\n return wz, (z,)\n\n\ndef _lambert_bwd(aux, g):\n \"\"\"Reverse mode impl for lambert.\"\"\"\n z, = aux\n wz = _lambert_custom_gradient(z)\n # At z = 0 the analytic expressions for the gradient results in a 0/0\n # expression. However, the continuous expansion (l'Hospital rule) gives a\n # derivative of 1.0 at z = 0. This case has to be handled separately with\n # a where clause.\n return g * tf.where(\n tf.equal(z, 0.), tf.ones([], wz.dtype), wz / (z * (1. + wz)))\n\n\ndef _lambert_jvp(primals, tangents):\n \"\"\"Computes JVP for lambert (supports JAX custom derivative).\"\"\"\n z, = primals\n dz, = tangents\n wz = _lambert_custom_gradient(z)\n\n # At z = 0 the analytic expressions for the gradient results in a 0/0\n # expression. However, the continuous expansion (l'Hospital rule) gives a\n # derivative of 1.0 at z = 0. This case has to be handled separately with\n # a where clause.\n pz = tf.where(tf.equal(z, 0.), tf.ones([], wz.dtype), wz / (z * (1. + wz)))\n return wz, pz * dz\n\n\n@tfp_custom_gradient.custom_gradient(\n vjp_fwd=_lambert_fwd,\n vjp_bwd=_lambert_bwd,\n jvp_fn=_lambert_jvp)\ndef _lambert_custom_gradient(z):\n return _lambertw_principal_branch(z)\n\n\ndef lambertw(z, name=None):\n \"\"\"Computes Lambert W of `z` element-wise.\n\n The Lambert W function is the inverse of `z = u * exp(u)`, i. e., it is the\n function that satisfies `u = W(z) * exp(W(z))`. The solution cannot be\n expressed as a composition of elementary functions and is thus part of the\n *special* functions in mathematics. See\n https://en.wikipedia.org/wiki/Lambert_W_function.\n\n In general it is a complex-valued function with multiple branches. The `k=0`\n branch is known as the *principal branch* of the Lambert W function and is\n implemented here. See also `scipy.special.lambertw`.\n\n This code returns only the real part of the image of the Lambert W function.\n\n # References\n\n Corless, R.M., Gonnet, G.H., Hare, D.E.G. et al. On the LambertW function.\n Adv Comput Math 5, 329-359 (1996) doi:10.1007/BF02124750\n\n Args:\n z: A Tensor with type `float32` or `float64`.\n name: A name for the operation (optional).\n\n Returns:\n lambertw: The Lambert W function evaluated at `z`. A Tensor with same shape\n and same dtype as `z`.\n \"\"\"\n with tf.name_scope(name or 'lambertw'):\n z = tf.convert_to_tensor(z)\n return _lambert_custom_gradient(z)\n\n\ndef log_gamma_correction(x, name=None):\n \"\"\"Returns the error of the Stirling approximation to lgamma(x) for x >= 8.\n\n This is useful for accurately evaluating ratios between Gamma functions, as\n happens when trying to compute Beta functions.\n\n Specifically,\n ```\n lgamma(x) approx (x - 0.5) * log(x) - x + 0.5 log (2 pi)\n + log_gamma_correction(x)\n ```\n for x >= 8.\n\n This is the function called Delta in [1], eq (30). We implement it with\n the rational minimax approximation given in [1], eq (32).\n\n References:\n\n [1] DiDonato and Morris, \"Significant Digit Computation of the Incomplete Beta\n Function Ratios\", 1988. Technical report NSWC TR 88-365, Naval Surface\n Warfare Center (K33), Dahlgren, VA 22448-5000. Section IV, Auxiliary\n Functions. https://apps.dtic.mil/dtic/tr/fulltext/u2/a210118.pdf\n\n Args:\n x: Floating-point Tensor at which to evaluate the log gamma correction\n elementwise. The approximation is accurate when x >= 8.\n name: Optional Python `str` naming the operation.\n\n Returns:\n lgamma_corr: Tensor of elementwise log gamma corrections.\n \"\"\"\n with tf.name_scope(name or 'log_gamma_correction'):\n dtype = dtype_util.common_dtype([x], tf.float32)\n x = tf.convert_to_tensor(x, dtype=dtype)\n\n minimax_coeff = tf.constant([\n 0.833333333333333e-01,\n -0.277777777760991e-02,\n 0.793650666825390e-03,\n -0.595202931351870e-03,\n 0.837308034031215e-03,\n -0.165322962780713e-02,\n ], dtype=dtype)\n\n inverse_x = 1 / x\n inverse_x_squared = inverse_x * inverse_x\n accum = minimax_coeff[5]\n for i in reversed(range(5)):\n accum = accum * inverse_x_squared + minimax_coeff[i]\n return accum * inverse_x\n\n\ndef _fix_gradient_for_broadcasting(a, b, grad_a, grad_b):\n \"\"\"Reduces broadcast dimensions for a custom gradient.\"\"\"\n if (tensorshape_util.is_fully_defined(a.shape) and\n tensorshape_util.is_fully_defined(b.shape) and\n a.shape == b.shape):\n return [grad_a, grad_b]\n a_shape = tf.shape(a)\n b_shape = tf.shape(b)\n ra, rb = tf.raw_ops.BroadcastGradientArgs(s0=a_shape, s1=b_shape)\n grad_a = tf.reshape(tf.reduce_sum(grad_a, axis=ra), a_shape)\n grad_b = tf.reshape(tf.reduce_sum(grad_b, axis=rb), b_shape)\n return [grad_a, grad_b]\n\n\ndef _log_gamma_difference_big_y(x, y):\n \"\"\"Returns lgamma(y) - lgamma(x + y), accurately if 0 <= x <= y and y >= 8.\n\n This is more accurate than subtracting lgammas directly because lgamma grows\n as `x log(x) - x + o(x)`, and thus subtracting the value of lgamma for two\n close, large arguments incurs catastrophic cancellation.\n\n The method is to partition lgamma into the Stirling approximation and the\n correction `log_gamma_correction`, symbolically cancel the former, and compute\n and subtract the latter.\n\n Args:\n x: Floating-point Tensor. `x` should be non-negative, and elementwise no\n more than `y`.\n y: Floating-point Tensor. `y` should be elementwise no less than 8.\n\n Returns:\n lgamma_diff: Floating-point Tensor, the difference lgamma(y) - lgamma(x+y),\n computed elementwise.\n \"\"\"\n cancelled_stirling = (-1 * (x + y - 0.5) * tf.math.log1p(x / y)\n - x * tf.math.log(y) + x)\n correction = log_gamma_correction(y) - log_gamma_correction(x + y)\n return correction + cancelled_stirling\n\n\ndef _log_gamma_difference_naive_gradient(x, y):\n big_y = _log_gamma_difference_big_y(x, y)\n small_y = tf.math.lgamma(y) - tf.math.lgamma(x + y)\n return tf.where(y >= 8, big_y, small_y)\n\n\ndef _log_gamma_difference_fwd(x, y):\n \"\"\"Compute output, aux (collaborates with _log_gamma_difference_bwd).\"\"\"\n return _log_gamma_difference_naive_gradient(x, y), (x, y)\n\n\ndef _log_gamma_difference_bwd(aux, g):\n \"\"\"Reverse mode impl for log-gamma-diff.\"\"\"\n x, y = aux\n # Computing the gradient naively as the difference of digammas because\n # (i) digamma grows slower than gamma, so gets into bad cancellations\n # later, and (ii) doing better is work. This matches what the gradient\n # would be if the forward pass were computed naively as the difference\n # of lgammas.\n #\n # Note: This gradient assumes x and y are the same shape; this needs to\n # be arranged by pre-broadcasting before calling\n # `_log_gamma_difference`.\n px = -tf.math.digamma(x + y)\n py = tf.math.digamma(y) + px\n return _fix_gradient_for_broadcasting(x, y, px * g, py * g)\n\n\ndef _log_gamma_difference_jvp(primals, tangents):\n \"\"\"Computes JVP for log-gamma-difference (supports JAX custom derivative).\"\"\"\n x, y = primals\n dx, dy = tangents\n # TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?\n bc_shp = prefer_static.broadcast_shape(prefer_static.shape(dx),\n prefer_static.shape(dy))\n dx = tf.broadcast_to(dx, bc_shp)\n dy = tf.broadcast_to(dy, bc_shp)\n # See note above in _log_gamma_difference_bwd.\n px = -tf.math.digamma(x + y)\n py = tf.math.digamma(y) + px\n return _log_gamma_difference_naive_gradient(x, y), px * dx + py * dy\n\n\n@tfp_custom_gradient.custom_gradient(\n vjp_fwd=_log_gamma_difference_fwd,\n vjp_bwd=_log_gamma_difference_bwd,\n jvp_fn=_log_gamma_difference_jvp)\ndef _log_gamma_difference_custom_gradient(x, y):\n return _log_gamma_difference_naive_gradient(x, y)\n\n\ndef log_gamma_difference(x, y, name=None):\n \"\"\"Returns lgamma(y) - lgamma(x + y), accurately.\n\n This is more accurate than subtracting lgammas directly because lgamma grows\n as `x log(x) - x + o(x)`, and thus subtracting the value of lgamma for two\n close, large arguments incurs catastrophic cancellation.\n\n When `y >= 8`, the method is to partition lgamma into the Stirling\n approximation and the correction `log_gamma_correction`, symbolically cancel\n the former, and compute and subtract the latter.\n\n Args:\n x: Floating-point Tensor. `x` should be non-negative, and elementwise no\n more than `y`.\n y: Floating-point Tensor. `y` should be positive.\n name: Optional Python `str` naming the operation.\n\n Returns:\n lgamma_diff: Floating-point Tensor, the difference lgamma(y) - lgamma(x+y),\n computed elementwise.\n \"\"\"\n with tf.name_scope(name or 'log_gamma_difference'):\n dtype = dtype_util.common_dtype([x, y], tf.float32)\n x = tf.convert_to_tensor(x, dtype=dtype)\n y = tf.convert_to_tensor(y, dtype=dtype)\n return _log_gamma_difference_custom_gradient(x, y)\n\n\ndef _lbeta_naive_gradient(x, y):\n \"\"\"Computes log(Beta(x, y)) with autodiff gradients only.\"\"\"\n # Flip args if needed so y >= x. Beta is mathematically symmetric but our\n # method for computing it is not.\n x, y = tf.minimum(x, y), tf.maximum(x, y)\n\n log2pi = tf.constant(np.log(2 * np.pi), dtype=x.dtype)\n # Two large arguments case: y >= x >= 8.\n log_beta_two_large = (0.5 * log2pi\n - 0.5 * tf.math.log(y)\n + log_gamma_correction(x)\n + log_gamma_correction(y)\n - log_gamma_correction(x + y)\n + (x - 0.5) * tf.math.log(x / (x + y))\n - y * tf.math.log1p(x / y))\n\n # One large argument case: x < 8, y >= 8.\n log_beta_one_large = tf.math.lgamma(x) + _log_gamma_difference_big_y(x, y)\n\n # Small arguments case: x <= y < 8.\n log_beta_small = tf.math.lgamma(x) + tf.math.lgamma(y) - tf.math.lgamma(x + y)\n\n # Reference [1] has two more arms, for cases where x or y falls into the\n # interval (2, 8). In these cases, reference [1] recommends iteratively\n # reducing the arguments using the identity\n # B(x, y) = B(x - 1, y) * (x - 1) / (x + y - 1)\n # so they fall in the interval [1, 2]. We choose not to do that here to avoid\n # a TensorFlow while loop, and hope that subtracting lgammas will be accurate\n # enough for the user's purposes.\n\n return tf.where(x >= 8,\n log_beta_two_large,\n tf.where(y >= 8,\n log_beta_one_large,\n log_beta_small))\n\n\ndef _lbeta_fwd(x, y):\n \"\"\"Compute output, aux (collaborates with _lbeta_bwd).\"\"\"\n return _lbeta_naive_gradient(x, y), (x, y)\n\n\ndef _lbeta_bwd(aux, g):\n x, y = aux\n total_digamma = tf.math.digamma(x + y)\n px = tf.math.digamma(x) - total_digamma\n py = tf.math.digamma(y) - total_digamma\n return _fix_gradient_for_broadcasting(x, y, px * g, py * g)\n\n\ndef _lbeta_jvp(primals, tangents):\n \"\"\"Computes JVP for log-beta (supports JAX custom derivative).\"\"\"\n x, y = primals\n dx, dy = tangents\n # TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?\n bc_shp = prefer_static.broadcast_shape(prefer_static.shape(dx),\n prefer_static.shape(dy))\n dx = tf.broadcast_to(dx, bc_shp)\n dy = tf.broadcast_to(dy, bc_shp)\n total_digamma = tf.math.digamma(x + y)\n px = tf.math.digamma(x) - total_digamma\n py = tf.math.digamma(y) - total_digamma\n return _lbeta_naive_gradient(x, y), px * dx + py * dy\n\n\n@tfp_custom_gradient.custom_gradient(\n vjp_fwd=_lbeta_fwd,\n vjp_bwd=_lbeta_bwd,\n jvp_fn=_lbeta_jvp)\ndef _lbeta_custom_gradient(x, y):\n \"\"\"Computes log(Beta(x, y)) with correct custom gradient.\"\"\"\n return _lbeta_naive_gradient(x, y)\n\n\[email protected](autograph=False)\ndef lbeta(x, y, name=None):\n \"\"\"Returns log(Beta(x, y)).\n\n This is semantically equal to\n lgamma(x) + lgamma(y) - lgamma(x + y)\n but the method is more accurate for arguments above 8.\n\n The reason for accuracy loss in the naive computation is catastrophic\n cancellation between the lgammas. This method avoids the numeric cancellation\n by explicitly decomposing lgamma into the Stirling approximation and an\n explicit `log_gamma_correction`, and cancelling the large terms from the\n Stirling analytically.\n\n The computed gradients are the same as for the naive forward computation,\n because (i) digamma grows much slower than lgamma, so cancellations aren't as\n bad, and (ii) it's simpler and faster than trying to be more accurate.\n\n References:\n\n [1] DiDonato and Morris, \"Significant Digit Computation of the Incomplete Beta\n Function Ratios\", 1988. Technical report NSWC TR 88-365, Naval Surface\n Warfare Center (K33), Dahlgren, VA 22448-5000. Section IV, Auxiliary\n Functions. https://apps.dtic.mil/dtic/tr/fulltext/u2/a210118.pdf\n\n Args:\n x: Floating-point Tensor.\n y: Floating-point Tensor.\n name: Optional Python `str` naming the operation.\n\n Returns:\n lbeta: Tensor of elementwise log beta(x, y).\n \"\"\"\n with tf.name_scope(name or 'tfp_lbeta'):\n dtype = dtype_util.common_dtype([x, y], tf.float32)\n x = tf.convert_to_tensor(x, dtype=dtype)\n y = tf.convert_to_tensor(y, dtype=dtype)\n return _lbeta_custom_gradient(x, y)\n\n\n# The Owen's T implementation below is based on\n# [1] Patefield M., Tandy D., Fast and Accurate Calcuation of Owen's T-Function\n# Journal of Statistical Software http://www.jstatsoft.org/v05/i05/paper\n\n\ndef _owens_t_method1(h, a, m):\n \"\"\"OwensT Method T1 using series expansions.\"\"\"\n # Method T1, which is evaluation of a particular series expansion of OwensT.\n\n dtype = dtype_util.common_dtype([h, a], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n\n neg_half_h_squared = -0.5 * tf.math.square(h)\n a_squared = tf.math.square(a)\n\n def series_evaluation(\n should_stop,\n index,\n ai,\n di,\n gi,\n series_sum):\n\n new_ai = a_squared * ai\n new_di = gi - di\n new_gi = neg_half_h_squared / index * gi\n new_series_sum = tf.where(\n should_stop, series_sum,\n series_sum + new_di * new_ai / (2. * index - 1.))\n should_stop = index >= m\n return should_stop, index + 1., new_ai, new_di, new_gi, new_series_sum\n\n initial_ai = a / numpy_dtype(2 * np.pi)\n initial_di = tf.math.expm1(neg_half_h_squared)\n initial_gi = neg_half_h_squared * tf.math.exp(neg_half_h_squared)\n initial_sum = (\n tf.math.atan(a) / numpy_dtype(2 * np.pi) + initial_ai * initial_di)\n\n (_, _, _, _, _, series_sum) = tf.while_loop(\n cond=lambda stop, *_: tf.reduce_any(~stop),\n body=series_evaluation,\n loop_vars=(\n # Use constant-tensor multiplication rather than static or dynamic\n # shape broadcasting logic, since the former will be robust to\n # partially-static shapes.\n tf.cast(\n tf.zeros_like(h) * tf.zeros_like(a),\n dtype=tf.bool),\n tf.cast(2., dtype=dtype),\n initial_ai,\n initial_di,\n initial_gi,\n initial_sum))\n return series_sum\n\n\ndef _owens_t_method2(h, a, m):\n \"\"\"OwensT Method T2 using Power series.\"\"\"\n # Method T2, which is evaluation approximating the (1 + x^2)^-1 term in the\n # denominator of the OwensT integrand via power series, and integrating this\n # term by term to get a series expansion.\n dtype = dtype_util.common_dtype([h, a], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n h_squared = tf.math.square(h)\n nega_squared = -tf.math.square(a)\n num_iterations = 2 * m + 1.\n y = tf.math.reciprocal(h_squared)\n\n def series_evaluation(\n should_stop,\n index,\n summand,\n term,\n series_sum):\n new_summand = y * (term - index * summand)\n new_term = nega_squared * term\n new_series_sum = tf.where(should_stop, series_sum, series_sum + new_summand)\n should_stop = index >= num_iterations\n return should_stop, index + 2., new_summand, new_term, new_series_sum\n\n initial_summand = -0.5 * tf.math.erf(a * h) / h\n initial_sum = initial_summand\n initial_term = a * tf.math.exp(\n -0.5 * tf.math.square(a * h)) / numpy_dtype(np.sqrt(2 * np.pi))\n\n (_, _, _, _, series_sum) = tf.while_loop(\n cond=lambda stop, *_: tf.reduce_any(~stop),\n body=series_evaluation,\n loop_vars=(\n # Use constant-tensor multiplication rather than static or dynamic\n # shape broadcasting logic, since the former will be robust to\n # partially-static shapes.\n tf.cast(\n tf.zeros_like(h) * tf.zeros_like(a),\n dtype=tf.bool),\n tf.cast(1., dtype=dtype),\n initial_summand,\n initial_term,\n initial_sum))\n return (series_sum * tf.math.exp(-0.5 * h_squared) /\n numpy_dtype(np.sqrt(2 * np.pi)))\n\n\ndef _owens_t_method3(h, a):\n \"\"\"OwensT Method T3, using Chebyshev series.\"\"\"\n # Method T3, which is evaluation approximating the (1 + x^2)^-1 term in the\n # denominator of the OwensT integrand via chebyshev series, and integrating\n # this term by term to get a series expansion.\n coefficients = np.array([\n 0.99999999999999999999999729978162447266851932041876728736094298092,\n -0.9999999999999999999946705637967839181062653325188532341679987487,\n 0.99999999999999999824849349313270659391127814689133077036298754586,\n -0.9999999999999997703859616213643405880166422891953033591551179153,\n 0.99999999999998394883415238173334565554173013941245103172035286759,\n -0.9999999999993063616095509371081203145247992197457263066869044528,\n 0.99999999997973363404094644295992298705901604112382452758559037676,\n -0.9999999995749584120690466801190516397534123780375655213594441702,\n 0.99999999332262341933753249439201609471582390767861031080974566177,\n -0.9999999188923242461073033481053037468263536806742737922476636768,\n 0.99999921951434836744028537835494208830551296800829326291600811289,\n -0.9999939351372067128309979219133169714722271997418573865750972505,\n 0.99996135597690552745362392866517133091672395614263398912807169603,\n -0.9997955636651394602640678896963029382098775775864121129307978458,\n 0.99909278962961710015348625142385059005136666194734431542322608252,\n -0.9965938374119182021193086204326146003381573358628885806714509388,\n 0.98910017138386127038463510314625339359073956513420458166238478926,\n -0.9700785580406933145213319822037627715121601685824945133478464073,\n 0.92911438683263187495758525500033707204091967947532160289872782771,\n -0.8542058695956156057286980736842905011429254735181323743367879525,\n 0.73796526033030091233118357742803709382964420335559408722681794195,\n -0.5852346988283739457012859900378515414416468058761587864517163279,\n 0.41599777614567630616566166358186846050387420534301419658012217494,\n -0.2588210875241943574388730510317252236407805082485246378222935376,\n 0.13755358251638926485046469515002655850557890194106175657270903465,\n -0.0607952766325955730493900985022020434830339794955745989150270485,\n 0.02163376832998715280598364838403905142754886795307972945570602292,\n -0.0059340569345518672987699581418120390055001422042884348392721826,\n 0.00117434148183329465104745761827392105533338601068118659634858706,\n -1.4891556133503689340734532606898813301663424844055299815106940E-4,\n 9.07235432079435758771092950798881466945428151426884488484154734E-6])\n\n a_squared = tf.math.square(a)\n h_squared = tf.math.square(h)\n y = tf.math.reciprocal(h_squared)\n vi = a * tf.math.exp(-0.5 * tf.math.square(a * h)) / np.sqrt(2 * np.pi)\n zi = 0.5 * tf.math.erf(a * h / np.sqrt(2.)) / h\n result = 0.\n\n for i in range(31):\n result = result + zi * coefficients[i]\n zi = y * ((2 * i + 1.) * zi - vi)\n vi = a_squared * vi\n return result * tf.math.exp(-0.5 * h_squared) / np.sqrt(2 * np.pi)\n\n\ndef _owens_t_method4(h, a, m):\n \"\"\"OwensT Method T4, which is a reordered evaluation of method T2.\"\"\"\n dtype = dtype_util.common_dtype([h, a], tf.float32)\n h_squared = tf.math.square(h)\n nega_squared = -tf.math.square(a)\n num_iterations = 2 * m + 1.\n\n def series_evaluation(\n should_stop,\n index,\n term,\n coeff,\n series_sum):\n new_coeff = (1. - h_squared * coeff) / index\n new_term = nega_squared * term\n new_series_sum = tf.where(\n should_stop, series_sum, series_sum + new_coeff * new_term)\n should_stop = index >= num_iterations\n return should_stop, index + 2., new_term, new_coeff, new_series_sum\n\n initial_term = a * tf.math.exp(\n -0.5 * h_squared * (1 - nega_squared)) / (2 * np.pi)\n initial_sum = initial_term\n\n (_, _, _, _, series_sum) = tf.while_loop(\n cond=lambda stop, *_: tf.reduce_any(~stop),\n body=series_evaluation,\n loop_vars=(\n tf.cast(\n tf.zeros_like(h) * tf.zeros_like(a),\n dtype=tf.bool),\n tf.cast(3., dtype=dtype),\n initial_term,\n tf.ones_like(h) * tf.ones_like(a),\n initial_sum))\n return series_sum\n\n\ndef _owens_t_method5(h, a):\n \"\"\"OwensT Method T5 which uses Gaussian Quadrature.\"\"\"\n # Method T5, which is a gaussian quadrature approximation of the integral.\n\n # These are shifted and squared.\n quadrature_points = np.array([\n 0.35082039676451715489E-02, 0.31279042338030753740E-01,\n 0.85266826283219451090E-01, 0.16245071730812277011E+00,\n 0.25851196049125434828E+00, 0.36807553840697533536E+00,\n 0.48501092905604697475E+00, 0.60277514152618576821E+00,\n 0.71477884217753226516E+00, 0.81475510988760098605E+00,\n 0.89711029755948965867E+00, 0.95723808085944261843E+00,\n 0.99178832974629703586E+00])\n quadrature_weights = np.array([\n 0.18831438115323502887E-01, 0.18567086243977649478E-01,\n 0.18042093461223385584E-01, 0.17263829606398753364E-01,\n 0.16243219975989856730E-01, 0.14994592034116704829E-01,\n 0.13535474469662088392E-01, 0.11886351605820165233E-01,\n 0.10070377242777431897E-01, 0.81130545742299586629E-02,\n 0.60419009528470238773E-02, 0.38862217010742057883E-02,\n 0.16793031084546090448E-02])\n r = tf.math.square(a[..., tf.newaxis]) * quadrature_points\n log_integrand = -0.5 * tf.math.square(\n h[..., tf.newaxis]) * (1. + r) - tf.math.log1p(r)\n return tf.math.exp(tf.math.log(a) + tf.math.reduce_logsumexp(\n log_integrand + np.log(quadrature_weights), axis=-1))\n\n\ndef _owens_t_method6(h, a):\n # Method T6, which is a special case for when a is near 1.\n r = tf.math.atan2(1. - a, 1. + a)\n # When a = 1, T(h, 1) = 0.5 * ndtr(h) * (1 - ndtr(h)).\n # Thus, when a is close to 1, we add a correction term.\n normh = 0.5 * tf.math.erfc(h / np.sqrt(2.))\n result = 0.5 * normh * (1 - normh)\n return tf.where(\n tf.math.equal(r, 0.),\n result,\n result - r * tf.math.exp(\n -(1. - a) * tf.math.square(h) / (2 * r)) / (2 * np.pi))\n\n\ndef _owens_t_regions(h, a):\n \"\"\"Returns a list of Tensors describing the region of computation.\"\"\"\n # We assume h >= 0, 0 <= a <= 1\n # Regions 1-7 that use T1.\n regions = []\n\n is_in_region1 = (h <= 0.06) & (a <= 0.025)\n is_in_region1 = is_in_region1 | (h <= 0.02) & (a <= 0.09)\n regions.append(is_in_region1)\n\n is_in_region2 = (h <= 0.02) & (a >= 0.09)\n is_in_region2 = (is_in_region2 |\n (h >= 0.02) & (h <= 0.06) & (a >= 0.025) & (a <= 0.36))\n is_in_region2 = is_in_region2 | (h >= 0.06) & (h <= 0.09) & (a <= 0.09)\n regions.append(is_in_region2)\n\n is_in_region3 = (h >= 0.02) & (h <= 0.06) & (a >= 0.36)\n is_in_region3 = (is_in_region3 |\n (h >= 0.06) & (h <= 0.09) & (a >= 0.09) & (a <= 0.5))\n is_in_region3 = (is_in_region3 |\n (h >= 0.09) & (h <= 0.26) & (a >= 0.025) & (a <= 0.15))\n regions.append(is_in_region3)\n\n is_in_region4 = (h >= 0.06) & (h <= 0.125) & (a >= 0.9)\n regions.append(is_in_region4)\n\n is_in_region5 = (h >= 0.06) & (h <= 0.26) & (a >= 0.5) & (a <= 0.9)\n is_in_region5 = (is_in_region5 |\n (h >= 0.09) & (h <= 0.26) & (a >= 0.15) & (a <= 0.5))\n is_in_region5 = (is_in_region5 |\n (h >= 0.26) & (h <= 0.6) & (a >= 0.025) & (a <= 0.36))\n regions.append(is_in_region5)\n\n is_in_region6 = (h >= 0.26) & (h <= 0.6) & (a >= 0.36) & (a <= 0.9)\n is_in_region6 = is_in_region6 | (h >= 0.125) & (h <= 0.4) & (a >= 0.9)\n regions.append(is_in_region6)\n\n is_in_region7 = (h >= 0.6) & (h <= 1.7) & (a >= 0.15) & (a <= 0.36)\n regions.append(is_in_region7)\n\n is_in_region8 = (h >= 0.6) & (h <= 1.7) & (a >= 0.36) & (a <= 0.9)\n is_in_region8 = (is_in_region8 |\n (h >= 0.4) & (h <= 1.6) & (a >= 0.9) & (a <= 0.99999))\n regions.append(is_in_region8)\n\n is_in_region9 = (h >= 4.8) & (a <= 0.09)\n regions.append(is_in_region9)\n\n is_in_region10 = (h >= 4.8) & (a >= 0.09) & (a <= 0.36)\n regions.append(is_in_region10)\n\n is_in_region11 = (h >= 4.8) & (a >= 0.36) & (a <= 0.5)\n regions.append(is_in_region11)\n\n is_in_region12 = (h >= 3.4) & (a >= 0.9)\n is_in_region12 = is_in_region12 | (h >= 3.36) & (a >= 0.36) & (a <= 0.9)\n is_in_region12 = is_in_region12 & ~is_in_region11\n regions.append(is_in_region12)\n\n is_in_region13 = (h >= 0.09) & (h <= 2.4) & (a <= 0.025)\n regions.append(is_in_region13)\n\n is_in_region14 = (h >= 0.6) & (h <= 1.7) & (a >= 0.025) & (a <= 0.09)\n regions.append(is_in_region14)\n\n is_in_region15 = (h >= 0.6) & (h <= 2.4) & (a >= 0.025) & (a <= 0.15)\n is_in_region15 = is_in_region15 & ~is_in_region14\n regions.append(is_in_region15)\n\n is_in_region16 = (h >= 1.7) & (h <= 2.4) & (a >= 0.15) & (a <= 0.36)\n is_in_region16 = is_in_region16 | (h >= 2.4) & (h <= 4.8) & (a <= 0.36)\n regions.append(is_in_region16)\n\n is_in_region17 = (h >= 1.6) & (h <= 3.4) & (a >= 0.9) & (a <= 0.99999)\n is_in_region17 = (is_in_region17 |\n (h >= 1.7) & (h <= 3.4) & (a >= 0.36) & (a <= 0.9))\n regions.append(is_in_region17)\n\n # Near the line a = 1.\n is_in_region18 = (h >= 0.4) & (h <= 2.33) & (a >= 0.99999)\n regions.append(is_in_region18)\n\n return regions\n\n\ndef _owens_t_naive_gradient(h, a):\n \"\"\"Computes OwensT(h, a) with autodiff gradients only.\"\"\"\n dtype = dtype_util.common_dtype([h, a], tf.float32)\n numpy_dtype = dtype_util.as_numpy_dtype(dtype)\n\n # OwensT(-h, a) = OwensT(h, a)\n h = tf.math.abs(h)\n abs_a = tf.math.abs(a)\n\n # Remap arguments such that 0 <= a <= 1.\n modified_a = tf.where(\n abs_a <= 1.,\n abs_a,\n tf.math.reciprocal(abs_a))\n\n modified_h = tf.where(abs_a <= 1., h, abs_a * h)\n\n # For regions 1 - 8, we use method1 with different orders.\n\n regions = _owens_t_regions(modified_h, modified_a)\n\n # Short-circuit if we are not in the first 8 regions.\n order = numpy_dtype(1.)\n order = tf.where(regions[0], numpy_dtype(2.), order)\n order = tf.where(regions[1], numpy_dtype(3.), order)\n order = tf.where(regions[2], numpy_dtype(4.), order)\n order = tf.where(regions[3], numpy_dtype(5.), order)\n order = tf.where(regions[4], numpy_dtype(7.), order)\n order = tf.where(regions[5], numpy_dtype(10.), order)\n order = tf.where(regions[6], numpy_dtype(12.), order)\n order = tf.where(regions[7], numpy_dtype(18.), order)\n result = _owens_t_method1(modified_h, modified_a, order)\n\n # For regions 9, 10 and 11 we use method2 with different orders.\n order = numpy_dtype(1.)\n order = tf.where(regions[8], numpy_dtype(10.), order)\n order = tf.where(regions[9], numpy_dtype(20.), order)\n order = tf.where(regions[10], numpy_dtype(30.), order)\n result = tf.where(\n regions[8] | regions[9] | regions[10],\n _owens_t_method2(modified_h, modified_a, order),\n result)\n\n # For region 12 we use method3.\n result = tf.where(\n regions[11], _owens_t_method3(modified_h, modified_a), result)\n\n # For regions 13, 14, 15 and 16 we use method4 with different orders.\n order = numpy_dtype(1.)\n order = tf.where(regions[12], numpy_dtype(4.), order)\n order = tf.where(regions[13], numpy_dtype(7.), order)\n order = tf.where(regions[14], numpy_dtype(8.), order)\n order = tf.where(regions[15], numpy_dtype(20.), order)\n result = tf.where(\n regions[12] | regions[13] | regions[14] | regions[15],\n _owens_t_method4(modified_h, modified_a, order),\n result)\n\n # For region 17 we use method5.\n result = tf.where(\n regions[16], _owens_t_method5(modified_h, modified_a), result)\n\n # For region 18, we use method6.\n result = tf.where(\n regions[17], _owens_t_method6(modified_h, modified_a), result)\n\n result = tf.where(\n tf.math.equal(modified_h, 0.),\n tf.math.atan(modified_a) / (2 * np.pi), result)\n\n # When a = 1, OwensT(h, 1) = ndtr(h) * (1 - ndtr(h))\n result = tf.where(\n tf.math.equal(modified_a, 1.),\n (0.125 * tf.math.erfc(-modified_h / np.sqrt(2.)) *\n tf.math.erfc(modified_h / np.sqrt(2.))), result)\n\n # When a = 0, we should return 0.\n result = tf.where(tf.math.equal(modified_a, 0.), numpy_dtype(0.), result)\n\n normh = tf.math.erfc(h / np.sqrt(2.))\n normah = tf.math.erfc(abs_a * h / np.sqrt(2.))\n # Compensate for when |a| > 1.\n result = tf.where(\n abs_a > 1.,\n tf.where(\n abs_a * h <= 0.67,\n 0.25 - 0.25 * tf.math.erf(\n h / np.sqrt(2.)) * tf.math.erf(abs_a * h / np.sqrt(2.)) - result,\n 0.25 * (normh + normah - normh * normah) - result),\n result)\n\n result = tf.math.sign(a) * result\n\n result = tf.where(tf.math.is_nan(a) | tf.math.is_nan(h),\n numpy_dtype(np.nan),\n result)\n return result\n\n\ndef _owens_t_fwd(h, a):\n \"\"\"Compute output, aux (collaborates with _owens_t_bwd).\"\"\"\n return _owens_t_naive_gradient(h, a), (h, a)\n\n\ndef _owens_t_bwd(aux, g):\n h, a = aux\n ph = (-tf.math.exp(-0.5 * tf.math.square(h)) *\n tf.math.erf(a * h / np.sqrt(2)) / (2 * np.sqrt(2 * np.pi)))\n pa = (tf.math.exp(-0.5 * (tf.math.square(a) + 1) * tf.math.square(h)) /\n (2 * np.pi * (tf.math.square(a) + 1.)))\n return _fix_gradient_for_broadcasting(h, a, ph * g, pa * g)\n\n\ndef _owens_t_jvp(primals, tangents):\n \"\"\"Computes JVP for log-beta (supports JAX custom derivative).\"\"\"\n h, a = primals\n dh, da = tangents\n # TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?\n bc_shp = prefer_static.broadcast_shape(prefer_static.shape(dh),\n prefer_static.shape(da))\n dh = tf.broadcast_to(dh, bc_shp)\n da = tf.broadcast_to(da, bc_shp)\n ph = (-tf.math.exp(-0.5 * tf.math.square(h)) *\n tf.math.erf(a * h / np.sqrt(2)) / (2 * np.sqrt(2 * np.pi)))\n pa = (tf.math.exp(-0.5 * (tf.math.square(a) + 1.)* tf.math.square(h)) /\n (2 * np.pi * (tf.math.square(a) + 1.)))\n return _owens_t_naive_gradient(h, a), ph * dh + pa * da\n\n\n@tfp_custom_gradient.custom_gradient(\n vjp_fwd=_owens_t_fwd,\n vjp_bwd=_owens_t_bwd,\n jvp_fn=_owens_t_jvp)\ndef _owens_t_custom_gradient(h, a):\n \"\"\"Computes OwensT(h, a) with correct custom gradient.\"\"\"\n return _owens_t_naive_gradient(h, a)\n\n\ndef owens_t(h, a, name=None):\n # pylint: disable=line-too-long\n \"\"\"Computes Owen's T function of `h` and `a` element-wise.\n\n Owen's T function is defined as the combined probability of the event `X > h`\n and `0 < Y < a * X`, where `X` and `Y` are independent standard normal\n random variables.\n\n In integral form this is defined as `1 / (2 * pi)` times the integral of\n `exp(-0.5 * h ** 2 * (1 + x ** 2)) / (1 + x ** 2)` from `0` to `a`.\n `h` and `a` can be any real number\n\n The Owen's T implementation below is based on\n ([Patefield and Tandy, 2000][1]).\n\n The Owen's T function has several notable properties which\n we list here for convenience. ([Owen, 1980][2], page 414)\n\n - P2.1 `T( h, 0) = 0`\n - P2.2 `T( 0, a) = arctan(a) / (2 pi)`\n - P2.3 `T( h, 1) = Phi(h) (1 - Phi(h)) / 2`\n - P2.4 `T( h, inf) = (1 - Phi(|h|)) / 2`\n - P2.5 `T(-h, a) = T(h, a)`\n - P2.6 `T( h,-a) = -T(h, a)`\n - P2.7 `T( h, a) + T(a h, 1 / a) = Phi(h)/2 + Phi(ah)/2 - Phi(h) Phi(ah) - [a<0]/2`\n - P2.8 `T( h, a) = arctan(a)/(2 pi) - 1/(2 pi) int_0^h int_0^{ax}` exp(-(x**2 + y**2)/2) dy dx`\n - P2.9 `T( h, a) = arctan(a)/(2 pi) - int_0**h phi(x) Phi(a x) dx + Phi(h)/2 - 1/4`\n\n `[a<0]` uses Iverson bracket notation, i.e., `[a<0] = {1 if a<0 and 0 otherwise`.\n\n Let us also define P2.10 as:\n - P2.10 `T(inf, a) = 0`\n - Proof\n\n Note that result #10,010.6 ([Owen, 1980][2], pg 403) states that:\n `int_0^inf phi(x) Phi(a+bx) dx = Phi(a/rho)/2 + T(a/rho,b) where rho = sqrt(1+b**2).`\n Using `a=0`, this result is:\n `int_0^inf phi(x) Phi(bx) dx = 1/4 + T(0,b) = 1/4 + arctan(b) / (2 pi)`\n Combining this with P2.9 implies\n ```none\n T(inf, a)\n = arctan(a)/(2 pi) - [ 1/4 + arctan(a) / (2 pi)] + Phi(inf)/2 - 1/4\n = -1/4 + 1/2 -1/4 = 0.\n ```\n QED\n\n Args:\n h: A `float` `Tensor` defined as in `P({X > h, 0 < Y < a X})`. Must be\n broadcastable with `a`.\n a: A `float` `Tensor` defined as in `P({X > h, 0 < Y < a X})`. Must be\n broadcastable with `h`.\n name: A name for the operation (optional).\n\n Returns:\n owens_t: A `Tensor` with the same type as `h` and `a`,\n\n #### References\n\n [1]: Patefield, Mike, and D. A. V. I. D. Tandy. \"Fast and accurate calculation\n of Owen’s T function.\" Journal of Statistical Software 5.5 (2000): 1-25.\n http://www.jstatsoft.org/v05/i05/paper\n [2]: Owen, Donald Bruce. \"A table of normal integrals: A table.\"\n Communications in Statistics-Simulation and Computation 9.4 (1980):\n 389-419.\n \"\"\"\n # pylint: enable=line-too-long\n with tf.name_scope(name or 'owens_t'):\n dtype = dtype_util.common_dtype([h, a], tf.float32)\n h = tf.convert_to_tensor(h, dtype=dtype, name='h')\n a = tf.convert_to_tensor(a, dtype=dtype, name='a')\n return _owens_t_custom_gradient(h, a)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The differential evolution global optimization algorithm.\n\nDifferential evolution (DE) is a population-based global optimization scheme.\nIt is applicable to problems with a continuous parameter space. Because it\ndoes not require computing gradients, it is also applicable to\nnon-differentiable functions. For more details see:\nhttps://en.wikipedia.org/wiki/Differential_evolution\n\nDE starts with a population of candidate solutions (represented as vectors).\nIt generates new trial solutions by a combination of\n- \"mutation\", namely adding the weighted difference between\n two population vectors to a target vector, and\n- \"crossover\", namely mixing the target with the content of a third.\n\nIf the trial vector thus constructed yields a lower cost value it\nreplaces the target vector it was made from. This process is repeated for\neach member of the population to complete one generation.\n\nThere are a number of different schemes that fall under the DE umbrella.\nThe established notation for representing these schemes\nis as `DE/x/y/z` where `x` specifies how the population member to be mutated is\nselected (may be `rand` if it is chosen randomly or `best` if the best\nmember is chosen), `y` specifies the number of difference vectors used and `z`\ndenotes the crossover scheme employed. This may be `bin` if binary\nrecombination is used or `exp` if exponential crossover is used.\n\nThe most commonly employed scheme is `DE/rand/1/bin`. This is the one\nimplemented in this module.\n\"\"\"\n\n\nimport collections\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import distributions\nfrom tensorflow_probability.python import util as tfp_util\nfrom tensorflow_probability.python.internal import dtype_util\n\n\n_DifferentialEvolutionOptimizerResults = collections.namedtuple(\n '_DifferentialEvolutionOptimizerResults', [\n 'converged', # Scalar boolean tensor indicating whether the minimum\n # was found within tolerance.\n 'failed', # Scalar boolean tensor indicating whether the search failed.\n # This may happen if the objective values become NaN for\n # the entire population.\n 'position', # A list of tensors containing the best argument value\n # found during the search. If the search converged, then\n # this value is the argmin of the objective function.\n 'objective_value', # A tensor containing the value of the objective\n # function at the `position`. If the search\n # converged, then this is the (local) minimum of\n # the objective function.\n 'final_population', # The final state of the population.\n 'final_objective_values', # The objective function evaluated at the\n # final population.\n 'initial_population', # The starting population.\n 'initial_objective_values', # The objective function evaluated at the\n # initial population.\n 'num_iterations' # The number of generations the population was\n # evolved.\n ])\n\n\nclass DifferentialEvolutionOptimizerResults(\n _DifferentialEvolutionOptimizerResults):\n \"\"\"Results of a differential evolution optimization run.\n\n The object has the following attributes:\n converged: Scalar boolean `Tensor` indicating whether the minimum was\n found within the specified tolerances.\n failed: Scalar boolean tensor indicating whether the search failed.\n This may happen if the objective values become NaN for the entire\n population.\n position: A `Tensor` containing the best point found during the search.\n If the search converged, then this value is the argmin of the\n objective function within the specified tolerances.\n objective_value: A tensor containing the value of the objective\n function at the `position`. If the search\n converged, then this is the (local) minimum of\n the objective function.\n final_population: The final state of the population.\n final_objective_values: The objective function evaluated at the\n final population.\n initial_population: The starting population.\n initial_objective_values: The objective function evaluated at the\n initial population.\n num_iterations: The number of generations the population was evolved.\n \"\"\"\n\n\n# Class to keep track of the loop variables in the minimize method.\n_MinimizeLoopVars = collections.namedtuple(\n '_MinimizeLoopVars',\n [\n 'converged',\n 'failed',\n 'num_iterations',\n 'population',\n 'population_values'\n ])\n\n\ndef one_step(\n objective_function,\n population,\n population_values=None,\n differential_weight=0.5,\n crossover_prob=0.9,\n seed=None,\n name=None):\n \"\"\"Performs one step of the differential evolution algorithm.\n\n Args:\n objective_function: A Python callable that accepts a batch of possible\n solutions and returns the values of the objective function at those\n arguments as a rank 1 real `Tensor`. This specifies the function to be\n minimized. The input to this callable may be either a single `Tensor`\n or a Python `list` of `Tensor`s. The signature must match the format of\n the argument `population`. (i.e., objective_function(*population) must\n return the value of the function to be minimized).\n population: `Tensor` or Python `list` of `Tensor`s representing the\n current population vectors. Each `Tensor` must be of the same real dtype.\n The first dimension indexes individual population members while the\n rest of the dimensions are consumed by the value function. For example,\n if the population is a single `Tensor` of shape [n, m1, m2], then `n` is\n the population size and the output of `objective_function` applied to the\n population is a `Tensor` of shape [n]. If the population is a python\n list of `Tensor`s then each `Tensor` in the list should have the first\n axis of a common size, say `n` and `objective_function(*population)`\n should return a `Tensor` of shape [n]. The population must have at least\n 4 members for the algorithm to work correctly.\n population_values: A `Tensor` of rank 1 and real dtype. The result of\n applying `objective_function` to the `population`. If not supplied it is\n computed using the `objective_function`.\n Default value: None.\n differential_weight: Real scalar `Tensor`. Must be positive and less than\n 2.0. The parameter controlling the strength of mutation.\n Default value: 0.5\n crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The\n probability of recombination per site.\n Default value: 0.9\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: None.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'one_step' is\n used.\n Default value: None\n\n Returns:\n A sequence containing the following elements (in order):\n next_population: A `Tensor` or Python `list` of `Tensor`s of the same\n structure as the input population. The population at the next generation.\n next_population_values: A `Tensor` of same shape and dtype as input\n `population_values`. The function values for the `next_population`.\n \"\"\"\n with tf.name_scope(name or 'one_step'):\n population, _ = _ensure_list(population)\n if population_values is None:\n population_values = objective_function(*population)\n population_size = tf.shape(population[0])[0]\n seed_stream = tfp_util.SeedStream(seed, salt='one_step')\n mixing_indices = _get_mixing_indices(population_size, seed=seed_stream())\n # Construct the mutated solution vectors. There is one for each member of\n # the population.\n mutants = _get_mutants(population,\n population_size,\n mixing_indices,\n differential_weight)\n # Perform recombination between the parents and the mutants.\n candidates = _binary_crossover(population,\n population_size,\n mutants,\n crossover_prob,\n seed=seed_stream())\n candidate_values = objective_function(*candidates)\n if population_values is None:\n population_values = objective_function(*population)\n\n population_values = tf.where(\n tf.math.is_nan(population_values),\n dtype_util.as_numpy_dtype(population_values.dtype)(np.inf),\n population_values)\n\n to_replace = candidate_values < population_values\n next_population = [\n tf1.where(to_replace, candidates_part, population_part)\n for candidates_part, population_part in zip(candidates, population)\n ]\n next_values = tf.where(to_replace, candidate_values, population_values)\n\n return next_population, next_values\n\n\ndef minimize(objective_function,\n initial_population=None,\n initial_position=None,\n population_size=50,\n population_stddev=1.,\n max_iterations=100,\n func_tolerance=0,\n position_tolerance=1e-8,\n differential_weight=0.5,\n crossover_prob=0.9,\n seed=None,\n name=None):\n \"\"\"Applies the Differential evolution algorithm to minimize a function.\n\n Differential Evolution is an evolutionary optimization algorithm which works\n on a set of candidate solutions called the population. It iteratively\n improves the population by applying genetic operators of mutation and\n recombination. The objective function `f` supplies the fitness of each\n candidate. A candidate `s_1` is considered better than `s_2` if\n `f(s_1) < f(s_2)`.\n\n This method allows the user to either specify an initial population or a\n single candidate solution. If a single solution is specified, a population\n of the specified size is initialized by adding independent normal noise\n to the candidate solution.\n\n The implementation also supports a multi-part specification of the state. For\n example, consider the objective function:\n\n ```python\n # x is a tensor of shape [n, m] while y is of shape [n].\n def objective(x, y):\n return tf.math.reduce_sum(x ** 2, axis=-1) + y ** 2\n ```\n The state in this case is specified by two input tensors `x` and `y`. To\n apply the algorithm to this objective function, one would need to specify\n either an initial population as a list of two tensors of shapes\n `[population_size, k]` and `[population_size]`. The following code shows the\n complete example:\n\n ```python\n population_size = 40\n # With an initial population and a multi-part state.\n initial_population = (tf.random.normal([population_size]),\n tf.random.normal([population_size]))\n def easom_fn(x, y):\n return -(tf.math.cos(x) * tf.math.cos(y) *\n tf.math.exp(-(x-np.pi)**2 - (y-np.pi)**2))\n\n optim_results = tfp.optimizer.differential_evolution_minimize(\n easom_fn,\n initial_population=initial_population,\n seed=43210)\n\n print(optim_results.converged)\n print(optim_results.position) # Should be (close to) [pi, pi].\n print(optim_results.objective_value) # Should be -1.\n\n\n # With a single starting point\n initial_position = (tf.constant(1.0), tf.constant(1.0))\n\n optim_results = tfp.optimizer.differential_evolution_minimize(\n easom_fn,\n initial_position=initial_position,\n population_size=40,\n population_stddev=2.0,\n seed=43210)\n ```\n\n Args:\n objective_function: A Python callable that accepts a batch of possible\n solutions and returns the values of the objective function at those\n arguments as a rank 1 real `Tensor`. This specifies the function to be\n minimized. The input to this callable may be either a single `Tensor`\n or a Python `list` of `Tensor`s. The signature must match the format of\n the argument `population`. (i.e. objective_function(*population) must\n return the value of the function to be minimized).\n initial_population: A real `Tensor` or Python list of `Tensor`s.\n If a list, each `Tensor` must be of rank at least 1 and with a common\n first dimension. The first dimension indexes into the candidate solutions\n while the rest of the dimensions (if any) index into an individual\n solution. The size of the population must be at least 4. This is a\n requirement of the DE algorithm.\n initial_position: A real `Tensor` of any shape. The seed solution used\n to initialize the population of solutions. If this parameter is specified\n then `initial_population` must not be specified.\n population_size: A positive scalar int32 `Tensor` greater than 4. The\n size of the population to evolve. This parameter is ignored if\n `initial_population` is specified.\n Default value: 50.\n population_stddev: A positive scalar real `Tensor` of the same dtype\n as `initial_position`. This parameter is ignored if `initial_population`\n is specified. Used to generate the population from the `initial_position`\n by adding random normal noise with zero mean and the specified standard\n deviation.\n Default value: 1.0\n max_iterations: Positive scalar int32 `Tensor`. The maximum number of\n generations to evolve the population for.\n Default value: 100\n func_tolerance: Scalar `Tensor` of the same dtype as the output of the\n `objective_function`. The algorithm stops if the absolute difference\n between the largest and the smallest objective function value in the\n population is below this number.\n Default value: 0\n position_tolerance: Scalar `Tensor` of the same real dtype as\n `initial_position` or `initial_population`. The algorithm terminates if\n the largest absolute difference between the coordinates of the population\n members is below this threshold.\n Default value: 1e-8\n differential_weight: Real scalar `Tensor`. Must be positive and less than\n 2.0. The parameter controlling the strength of mutation in the algorithm.\n Default value: 0.5\n crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The\n probability of recombination per site.\n Default value: 0.9\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: None.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name\n 'differential_evolution_minimize' is used.\n Default value: None\n\n Returns:\n optimizer_results: An object containing the following attributes:\n converged: Scalar boolean `Tensor` indicating whether the minimum was\n found within the specified tolerances.\n num_objective_evaluations: The total number of objective\n evaluations performed.\n position: A `Tensor` containing the best point found during the search.\n If the search converged, then this value is the argmin of the\n objective function within the specified tolerances.\n objective_value: A `Tensor` containing the value of the objective\n function at the `position`. If the search\n converged, then this is the (local) minimum of\n the objective function.\n final_population: The final state of the population.\n final_objective_values: The objective function evaluated at the\n final population.\n initial_population: The starting population.\n initial_objective_values: The objective function evaluated at the\n initial population.\n num_iterations: The number of iterations of the main algorithm body.\n\n Raises:\n ValueError: If neither the initial population, nor the initial position\n are specified or if both are specified.\n \"\"\"\n\n if initial_population is None and initial_position is None:\n raise ValueError('Either the initial population or the initial position '\n 'must be specified.')\n if initial_population is not None and initial_position is not None:\n raise ValueError('Only one of initial population or initial position '\n 'should be specified')\n\n with tf.name_scope(name or 'minimize'):\n (\n was_iterable,\n population,\n population_values,\n max_iterations,\n func_tolerance,\n position_tolerance,\n differential_weight,\n crossover_prob\n ) = _get_initial_args(objective_function,\n initial_population,\n initial_position,\n population_size,\n population_stddev,\n max_iterations,\n func_tolerance,\n position_tolerance,\n differential_weight,\n crossover_prob,\n seed)\n\n def evolve_body(loop_vars):\n \"\"\"Performs one step of the evolution.\"\"\"\n next_population, next_population_values = one_step(\n objective_function,\n loop_vars.population,\n population_values=loop_vars.population_values,\n differential_weight=differential_weight,\n crossover_prob=crossover_prob,\n seed=seed)\n converged = _check_convergence(next_population,\n next_population_values,\n func_tolerance,\n position_tolerance)\n\n failed = _check_failure(next_population_values)\n\n return [_MinimizeLoopVars(\n converged=converged,\n failed=failed,\n num_iterations=loop_vars.num_iterations+1,\n population=next_population,\n population_values=next_population_values)]\n\n def evolve_cond(loop_vars):\n should_stop = (\n loop_vars.failed |\n loop_vars.converged |\n (max_iterations is not None and\n loop_vars.num_iterations >= max_iterations))\n return ~should_stop\n\n initial_vars = _MinimizeLoopVars(\n converged=tf.convert_to_tensor(False),\n failed=tf.convert_to_tensor(False),\n num_iterations=tf.convert_to_tensor(0),\n population=population,\n population_values=population_values)\n final_state = tf.while_loop(\n cond=evolve_cond, body=evolve_body, loop_vars=(initial_vars,))[0]\n best_position, best_values = _find_best_in_population(\n final_state.population,\n final_state.population_values)\n # Ensure we return a similar structure to what the user supplied.\n final_population = final_state.population\n if not was_iterable:\n final_population = final_population[0]\n best_position = best_position[0]\n return DifferentialEvolutionOptimizerResults(\n converged=final_state.converged,\n failed=final_state.failed,\n position=best_position,\n objective_value=best_values,\n final_population=final_population,\n final_objective_values=final_state.population_values,\n initial_population=population,\n initial_objective_values=population_values,\n num_iterations=final_state.num_iterations)\n\n\ndef _get_initial_args(objective_function,\n initial_population,\n initial_position,\n population_size,\n population_stddev,\n max_iterations,\n func_tolerance,\n position_tolerance,\n differential_weight,\n crossover_prob,\n seed):\n \"\"\"Processes initial args.\"\"\"\n was_iterable = False\n if initial_position is not None:\n initial_position, was_iterable = _ensure_list(initial_position)\n\n if initial_population is not None:\n initial_population, was_iterable = _ensure_list(initial_population)\n\n population = _get_starting_population(initial_population,\n initial_position,\n population_size,\n population_stddev,\n seed=seed)\n\n differential_weight = tf.convert_to_tensor(\n differential_weight, dtype=population[0].dtype.base_dtype)\n\n crossover_prob = tf.convert_to_tensor(crossover_prob)\n population_values = objective_function(*population)\n if max_iterations is not None:\n max_iterations = tf.convert_to_tensor(max_iterations)\n func_tolerance = tf.convert_to_tensor(\n func_tolerance, dtype=population_values.dtype.base_dtype)\n position_tolerance = tf.convert_to_tensor(\n position_tolerance, dtype=population[0].dtype.base_dtype)\n return (was_iterable,\n population,\n population_values,\n max_iterations,\n func_tolerance,\n position_tolerance,\n differential_weight,\n crossover_prob)\n\n\ndef _check_failure(population_values):\n \"\"\"Checks if all the population values are NaN/infinite.\"\"\"\n return tf.math.reduce_all(tf.math.is_inf(population_values))\n\n\ndef _find_best_in_population(population, values):\n \"\"\"Finds the population member with the lowest value.\"\"\"\n best_value = tf.math.reduce_min(values)\n best_index = tf.where(tf.math.equal(values, best_value))[0, 0]\n\n return ([population_part[best_index] for population_part in population],\n best_value)\n\n\ndef _check_convergence(population,\n population_values,\n func_tolerance,\n position_tolerance):\n \"\"\"Checks whether the convergence criteria have been met.\"\"\"\n # Check func tolerance\n value_range = tf.math.abs(\n tf.math.reduce_max(population_values) -\n tf.math.reduce_min(population_values))\n value_converged = value_range <= func_tolerance\n # Ideally, we would compute the position convergence by computing the\n # pairwise distance between every member of the population and checking if\n # the maximum of those is less than the supplied tolerance. However, this is\n # completely infeasible in terms of performance. We adopt a more conservative\n # approach which checks the distance between the first population member\n # with the rest of the population. If the largest such distance is less than\n # half the supplied tolerance, we stop. The reason why this is sufficient is\n # as follows. For any pair of distinct points (a, b) in the population, we\n # have the relation: |a - b| <= |x0 - a| + |x0 - b|, where x0 is any\n # other point. In particular, let x0 be the first element of the population\n # and suppose that the largest distance between this point and any other\n # member is epsilon. Then, for any pair of points (a, b),\n # |a - b| <= 2 * epsilon and hence, the maximum distance between any pair of\n # points in the population is bounded above by twice the distance between\n # the first point and other points.\n half_tol = position_tolerance / 2\n def part_converged(part):\n return tf.math.reduce_max(tf.math.abs(part - part[0])) <= half_tol\n\n x_converged = tf.math.reduce_all(\n [part_converged(part) for part in population])\n return value_converged | x_converged\n\n\ndef _get_starting_population(initial_population,\n initial_position,\n population_size,\n population_stddev,\n seed):\n \"\"\"Constructs the initial population.\n\n If an initial population is not already provided, this function constructs\n a population by adding random normal noise to the initial position.\n\n Args:\n initial_population: None or a list of `Tensor`s. The initial population.\n initial_position: None or a list of `Tensor`s. The initial position.\n If initial_population is None, this argument must not be None.\n population_size: Scalar integer `Tensor`. The number of members in the\n population. If the initial population is not None, this parameter is\n ignored.\n population_stddev: A positive scalar real `Tensor` of the same dtype\n as `initial_position` or `initial_population` (whichever is not None).\n This parameter is ignored if `initial_population`\n is specified. Used to generate the population from the\n `initial_position` by adding random normal noise with zero mean and\n the specified standard deviation.\n seed: Seed for random number generation.\n\n Returns:\n A list of `Tensor`s. The initial population.\n \"\"\"\n if initial_population is not None:\n return [tf.convert_to_tensor(part) for part in initial_population]\n # Constructs the population by adding normal noise to the initial position.\n seed_stream = tfp_util.SeedStream(seed, salt='get_starting_population')\n population = []\n for part in initial_position:\n part = tf.convert_to_tensor(part)\n part_event_shape = tf.shape(part)\n # We only draw population_size-1 random vectors because we want to ensure\n # that the supplied position is part of the population. The first member\n # is set to be the initial_position.\n population_part_shape = tf.concat([[population_size-1],\n part_event_shape], axis=0)\n population_part = tf.random.normal(population_part_shape,\n stddev=population_stddev,\n dtype=part.dtype.base_dtype,\n seed=seed_stream())\n population_part += part\n population_part = tf.concat([[part], population_part], axis=0)\n population.append(population_part)\n return population\n\n\ndef _binary_crossover(population,\n population_size,\n mutants,\n crossover_prob,\n seed):\n \"\"\"Performs recombination by binary crossover for the current population.\n\n Let v_i denote the i'th component of the member v and m_i the corresponding\n component of the mutant vector corresponding to v. Then the crossed over\n vector w_i is determined by setting w_i =\n (m_i with probability=crossover_prob else v_i). In addition, DE requires that\n at least one of the components is crossed over (otherwise we end\n up with no change). This is done by choosing on index say k randomly where\n a force crossover is performed (i.e. w_k = m_k). This is the scheme\n implemented in this function.\n\n Args:\n population: A Python list of `Tensor`s where each `Tensor` in the list\n must be of rank at least 1 and all the elements must have a common\n first dimension. The base population to cross over.\n population_size: A scalar integer `Tensor`. The number of elements in the\n population (i.e. size of the first dimension of any member of\n `population`).\n mutants: A Python list of `Tensor`s with the same structure as `population`.\n The mutated population.\n crossover_prob: A positive real scalar `Tensor` bounded above by 1.0. The\n probability of a crossover being performed for each axis.\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n\n Returns:\n A list of `Tensor`s of the same structure, dtype and shape as `population`.\n The recombined population.\n \"\"\"\n sizes = [tf.cast(tf.size(x), dtype=tf.float64) for x in population]\n seed_stream = tfp_util.SeedStream(seed, salt='binary_crossover')\n force_crossover_group = distributions.Categorical(sizes).sample(\n [population_size, 1], seed=seed_stream())\n recombinants = []\n for i, population_part in enumerate(population):\n pop_part_flat = tf.reshape(population_part, [population_size, -1])\n mutant_part_flat = tf.reshape(mutants[i], [population_size, -1])\n part_size = tf.size(population_part) // population_size\n force_crossovers = tf.one_hot(\n tf.random.uniform([population_size],\n minval=0,\n maxval=part_size,\n dtype=tf.int32,\n seed=seed_stream()),\n part_size,\n on_value=True,\n off_value=False,\n dtype=tf.bool) # Tensor of shape [population_size, size]\n group_mask = tf.math.equal(force_crossover_group, i)\n force_crossovers &= group_mask\n do_binary_crossover = tf.random.uniform(\n [population_size, part_size],\n dtype=crossover_prob.dtype.base_dtype,\n seed=seed_stream()) < crossover_prob\n do_binary_crossover |= force_crossovers\n recombinant_flat = tf1.where(\n do_binary_crossover, mutant_part_flat, pop_part_flat)\n recombinant = tf.reshape(recombinant_flat, tf.shape(population_part))\n recombinants.append(recombinant)\n return recombinants\n\n\ndef _get_mutants(population,\n population_size,\n mixing_indices,\n differential_weight):\n \"\"\"Computes the mutatated vectors for each population member.\n\n Args:\n population: Python `list` of `Tensor`s representing the\n current population vectors. Each `Tensor` must be of the same real dtype.\n The first dimension of each `Tensor` indexes individual\n population members. For example, if the population is a list with a\n single `Tensor` of shape [n, m1, m2], then `n` is the population size and\n the shape of an individual solution is [m1, m2].\n If there is more than one element in the population, then each `Tensor`\n in the list should have the first axis of the same size.\n population_size: Scalar integer `Tensor`. The size of the population.\n mixing_indices: `Tensor` of integral dtype and shape [n, 3] where `n` is the\n number of members in the population. Each element of the `Tensor` must be\n a valid index into the first dimension of the population (i.e range\n between `0` and `n-1` inclusive).\n differential_weight: Real scalar `Tensor`. Must be positive and less than\n 2.0. The parameter controlling the strength of mutation.\n\n Returns:\n mutants: `Tensor` or Python `list` of `Tensor`s of the same shape and dtype\n as the input population. The mutated vectors.\n \"\"\"\n mixing_indices = tf.reshape(mixing_indices, [-1])\n weights = tf.stack([1.0, differential_weight, -differential_weight])\n def _mutant_part(population_part):\n donors = tf.gather(population_part, mixing_indices)\n donors = tf.transpose(\n a=tf.reshape(donors, [population_size, 3, -1]), perm=[0, 2, 1])\n return tf.math.reduce_sum(donors * weights, axis=-1)\n\n return [_mutant_part(population_part) for population_part in population]\n\n\ndef _get_mixing_indices(size, seed=None, name=None):\n \"\"\"Generates an array of indices suitable for mutation operation.\n\n The mutation operation in differential evolution requires that for every\n element of the population, three distinct other elements be chosen to produce\n a trial candidate. This function generates an array of shape [size, 3]\n satisfying the properties that:\n (a). array[i, :] does not contain the index 'i'.\n (b). array[i, :] does not contain any overlapping indices.\n (c). All elements in the array are between 0 and size - 1 inclusive.\n\n Args:\n size: Scalar integer `Tensor`. The number of samples as well as a the range\n of the indices to sample from.\n seed: `int` or None. The random seed for this `Op`. If `None`, no seed is\n applied.\n Default value: `None`.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: 'get_mixing_indices'.\n\n Returns:\n sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing\n samples without replacement between 0 and size - 1 (inclusive) with the\n `i`th row not including the number `i`.\n \"\"\"\n with tf.name_scope(name or 'get_mixing_indices'):\n size = tf.convert_to_tensor(size)\n dtype = size.dtype\n seed_stream = tfp_util.SeedStream(seed, salt='get_mixing_indices')\n first = tf.random.uniform([size],\n maxval=size-1,\n dtype=dtype,\n seed=seed_stream())\n second = tf.random.uniform([size],\n maxval=size-2,\n dtype=dtype,\n seed=seed_stream())\n third = tf.random.uniform([size],\n maxval=size-3,\n dtype=dtype,\n seed=seed_stream())\n\n # Shift second if it is on top of or to the right of first\n second = tf.where(first < second, second, second + 1)\n smaller = tf.math.minimum(first, second)\n larger = tf.math.maximum(first, second)\n # Shift the third one so it does not coincide with either the first or the\n # second number. Assuming first < second, shift by 1 if the number is in\n # [first, second) and by 2 if the number is greater than or equal to the\n # second.\n third = tf.where(third < smaller, third, third + 1)\n third = tf.where(third < larger, third, third + 1)\n sample = tf.stack([first, second, third], axis=1)\n to_avoid = tf.expand_dims(tf.range(size), axis=-1)\n sample = tf.where(sample < to_avoid, sample, sample + 1)\n return sample\n\n\ndef _ensure_list(tensor_or_list):\n \"\"\"Converts the input arg to a list if it is not a list already.\n\n Args:\n tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to\n convert to a list of `Tensor`s.\n\n Returns:\n A tuple of two elements. The first is a Python list of `Tensor`s containing\n the original arguments. The second is a boolean indicating whether\n the original argument was a list or tuple already.\n \"\"\"\n if isinstance(tensor_or_list, (list, tuple)):\n return list(tensor_or_list), True\n return [tensor_or_list], False\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Positive-Semidefinite Kernel library utilities.\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n__all__ = [\n 'mask_matrix',\n 'maybe_get_common_dtype',\n 'pad_shape_with_ones',\n 'pairwise_square_distance_matrix',\n 'pairwise_square_distance_tensor',\n 'sum_rightmost_ndims_preserving_shape',\n]\n\n\ndef pad_shape_with_ones(x, ndims, start=-1):\n \"\"\"Maybe add `ndims` ones to `x.shape` starting at `start`.\n\n If `ndims` is zero, this is a no-op; otherwise, we will create and return a\n new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the\n right side. If the shape of `x` is known statically, the shape of the return\n value will be as well.\n\n Args:\n x: The `Tensor` we'll return a reshaping of.\n ndims: Python `integer` number of ones to pad onto `x.shape`.\n start: Python `integer` specifying where to start padding with ones. Must\n be a negative integer. For instance, a value of `-1` means to pad at the\n end of the shape. Default value: `-1`.\n Returns:\n If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x`\n with `ndims` ones concatenated on the right side. If possible, returns a\n `Tensor` whose shape is known statically.\n Raises:\n ValueError: if `ndims` is not a Python `integer` greater than or equal to\n zero.\n \"\"\"\n if not (isinstance(ndims, int) and ndims >= 0):\n raise ValueError(\n '`ndims` must be a Python `integer` greater than zero. Got: {}'\n .format(ndims))\n if not (isinstance(start, int) and start <= -1):\n raise ValueError(\n '`start` must be a Python `integer` less than zero. Got: {}'\n .format(start))\n if ndims == 0:\n return x\n x = tf.convert_to_tensor(value=x)\n original_shape = x.shape\n rank = ps.rank(x)\n first_shape = ps.shape(x)[:rank + start + 1]\n second_shape = ps.shape(x)[rank + start + 1:]\n new_shape = ps.pad(first_shape, paddings=[[0, ndims]], constant_values=1)\n new_shape = ps.concat([new_shape, second_shape], axis=0)\n x = tf.reshape(x, new_shape)\n if start == -1:\n tensorshape_util.set_shape(\n x, tensorshape_util.concatenate(original_shape, [1] * ndims))\n elif tensorshape_util.rank(original_shape) is not None:\n original_ndims = tensorshape_util.rank(original_shape)\n new_shape = tensorshape_util.concatenate(\n original_shape[:original_ndims + start + 1],\n tensorshape_util.concatenate(\n [1] * ndims,\n original_shape[original_ndims + start + 1:]))\n tensorshape_util.set_shape(x, new_shape)\n return x\n\n\ndef sum_rightmost_ndims_preserving_shape(x, ndims):\n \"\"\"Return `Tensor` with right-most ndims summed.\n\n Args:\n x: the `Tensor` whose right-most `ndims` dimensions to sum\n ndims: number of right-most dimensions to sum.\n\n Returns:\n A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most\n dimensions. If the shape of `x` is statically known, the result will also\n have statically known shape. Otherwise, the resulting shape will only be\n known at runtime.\n \"\"\"\n x = tf.convert_to_tensor(x)\n x_ndims = ps.rank(x)\n return tf.reduce_sum(x, axis=ps.range(x_ndims - ndims, x_ndims))\n\n\[email protected]_gradient\ndef sqrt_with_finite_grads(x, name=None):\n \"\"\"A sqrt function whose gradient at zero is very large but finite.\n\n Args:\n x: a `Tensor` whose sqrt is to be computed.\n name: a Python `str` prefixed to all ops created by this function.\n Default `None` (i.e., \"sqrt_with_finite_grads\").\n\n Returns:\n sqrt: the square root of `x`, with an overridden gradient at zero\n grad: a gradient function, which is the same as sqrt's gradient everywhere\n except at zero, where it is given a large finite value, instead of `inf`.\n\n Raises:\n TypeError: if `tf.convert_to_tensor(x)` is not a `float` type.\n\n Often in kernel functions, we need to compute the L2 norm of the difference\n between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the\n case where `x` and `y` are identical, e.g., on the diagonal of a kernel\n matrix, we get `NaN`s when we take gradients with respect to the inputs. To\n see, this consider the forward pass:\n\n ```\n [x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] -->\n (x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2))\n ```\n\n When we backprop through this forward pass, the `sqrt` yields an `inf` because\n `grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at\n the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get\n `0 * inf`, which is `NaN`.\n\n We'd like to avoid these `NaN`s, since they infect the rest of the connected\n computation graph. Practically, when two inputs to a kernel function are\n equal, we are in one of two scenarios:\n 1. We are actually computing k(x, x), in which case norm(x - x) is\n identically zero, independent of x. In this case, we'd like the\n gradient to reflect this independence: it should be zero.\n 2. We are computing k(x, y), and x just *happens* to have the same value\n as y. The gradient at such inputs is in fact ill-defined (there is a\n cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are,\n however, an infinite number of sub-gradients, all of which are valid at\n all such inputs. By symmetry, there is exactly one which is \"special\":\n zero, and we elect to use that value here. In practice, having two\n identical inputs to a kernel matrix is probably a pathological\n situation to be avoided, but that is better resolved at a higher level\n than this.\n\n To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine\n the gradient at zero. We assign it to be a very large value, specifically\n the sqrt of the max value of the floating point dtype of the input. We use\n the sqrt (as opposed to just using the max floating point value) to avoid\n potential overflow when combining this value with others downstream.\n \"\"\"\n with tf.name_scope(name or 'sqrt_with_finite_grads'):\n x = tf.convert_to_tensor(value=x, name='x')\n if not dtype_util.is_floating(x.dtype):\n raise TypeError('Input `x` must be floating type.')\n def grad(grad_ys):\n large_float_like_x = np.sqrt(\n np.finfo(dtype_util.as_numpy_dtype(x.dtype)).max)\n safe_grads = tf.where(\n tf.equal(x, 0), large_float_like_x, 0.5 * tf.math.rsqrt(x))\n return grad_ys * safe_grads\n return tf.sqrt(x), grad\n\n\ndef maybe_get_common_dtype(arg_list):\n \"\"\"Return common dtype of arg_list, or None.\n\n Args:\n arg_list: an iterable of items which are either `None` or have a `dtype`\n property.\n\n Returns:\n dtype: The common dtype of items in `arg_list`, or `None` if the list is\n empty or all items are `None`.\n \"\"\"\n # Note that `all` defaults to `True` if `arg_list` is empty.\n if all(a is None for a in arg_list):\n return None\n return dtype_util.common_dtype(arg_list, tf.float32)\n\n\ndef pairwise_square_distance_matrix(x1, x2, feature_ndims):\n \"\"\"Returns pairwise square distance between x1 and x2.\n\n Given `x1` and `x2`, Tensors with shape `[..., N, D1, ... Dk]` and\n `[..., M, D1, ... Dk]`, compute the pairwise distance matrix `a_ij` of shape\n `[..., N, M]`, where each entry `a_ij` is the square of the euclidean norm of\n `x1[..., i, ...] - x2[..., j, ...]`.\n\n The approach uses the fact that (where k = 1).\n ```none\n a_ij = sum_d (x1[i, d] - x2[j, d]) ** 2 =\n sum_d x1[i, d] ** 2 + x2[j, d] ** 2 - 2 * x1[i, d] * x2[j, d]\n ```\n\n The latter term can be written as a matmul between `x1` and `x2`.\n This reduces the memory from the naive approach of computing the\n squared difference of `x1` and `x2` by a factor of `(prod_k D_k) ** 2`.\n This is at the cost of the computation being more numerically unstable.\n\n Args:\n x1: Floating point `Tensor` with shape `B1 + [N] + [D1, ..., Dk]`,\n where `B1` is a (possibly empty) batch shape.\n x2: Floating point `Tensor` with shape `B2 + [M] + [D1, ..., Dk]`,\n where `B2` is a (possibly empty) batch shape that broadcasts\n with `B1`.\n feature_ndims: The number of dimensions to consider for the euclidean\n norm. This is `k` from above.\n Returns:\n `Tensor` of shape `[..., N, M]` representing the pairwise square\n distance matrix.\n \"\"\"\n row_norm_x1 = sum_rightmost_ndims_preserving_shape(\n tf.square(x1), feature_ndims)[..., tf.newaxis]\n row_norm_x2 = sum_rightmost_ndims_preserving_shape(\n tf.square(x2), feature_ndims)[..., tf.newaxis, :]\n\n x1 = tf.reshape(x1, ps.concat(\n [ps.shape(x1)[:-feature_ndims], [\n ps.reduce_prod(ps.shape(x1)[-feature_ndims:])]], axis=0))\n x2 = tf.reshape(x2, ps.concat(\n [ps.shape(x2)[:-feature_ndims], [\n ps.reduce_prod(ps.shape(x2)[-feature_ndims:])]], axis=0))\n pairwise_sq = row_norm_x1 + row_norm_x2 - 2 * tf.linalg.matmul(\n x1, x2, transpose_b=True)\n pairwise_sq = tf.clip_by_value(pairwise_sq, 0., np.inf)\n return pairwise_sq\n\n\ndef pairwise_square_distance_tensor(\n x1, x2, feature_ndims, x1_example_ndims=1, x2_example_ndims=1):\n \"\"\"Returns pairwise distance between x1 and x2.\n\n This method is a generalization `pairwise_square_distance_matrix`.\n Given `x1` and `x2`, Tensors with shape `[..., N1, ... Nm, D1, ... Dk]` and\n `[..., M1, ... Ml, D1, ... Dk]`, compute the pairwise distance tensor `A` of\n shape `[..., N1, ... Nm, M1, ... Ml]`, where `m` is `x1_example_ndims` and\n `l` is `x2_example_ndims`.\n\n Args:\n x1: Floating point `Tensor` with shape `B1 + E1 + [D1, ..., Dk]`,\n where `B1` is a (possibly empty) batch shape, and `E1` is a list\n of `x1_example_ndims` values.\n x2: Floating point `Tensor` with shape `B2 + [M] + [D1, ..., Dk]`,\n where `B2` is a (possibly empty) batch shape that broadcasts\n with `B1`, and `E2` is a list of `x1_example_ndims` values.\n feature_ndims: The number of dimensions to consider for the euclidean\n norm. This is `k` from above.\n x1_example_ndims: Integer for number of example dimensions in `x1`. This is\n `len(E1)`.\n x2_example_ndims: Integer for number of example dimensions in `x2`. This is\n `len(E2)`.\n Returns:\n `Tensor` of shape `bc(B1, B2) + E1 + E2` representing the pairwise square\n distance tensor.\n \"\"\"\n # Collapse all the example dimensions and then expand after.\n x1_shape = tf.shape(x1)\n x1_example_shape = x1_shape[\n -(feature_ndims + x1_example_ndims):-feature_ndims]\n\n x2_shape = tf.shape(x2)\n x2_example_shape = x2_shape[\n -(feature_ndims + x2_example_ndims):-feature_ndims]\n\n x1 = tf.reshape(x1, tf.concat(\n [x1_shape[:-(feature_ndims + x1_example_ndims)],\n [-1],\n x1_shape[-feature_ndims:]], axis=0))\n x2 = tf.reshape(x2, tf.concat(\n [x2_shape[:-(feature_ndims + x2_example_ndims)],\n [-1],\n x2_shape[-feature_ndims:]], axis=0))\n pairwise = pairwise_square_distance_matrix(\n x1, x2, feature_ndims=feature_ndims)\n # Now we need to undo the transformation.\n return tf.reshape(pairwise, tf.concat([\n tf.shape(pairwise)[:-2], x1_example_shape, x2_example_shape], axis=0))\n\n\ndef mask_matrix(x, is_missing=None):\n \"\"\"Copies a matrix, replacing masked-out rows/cols from the identity matrix.\n\n Args:\n x: A Tensor of shape `[..., n, n]`, representing a batch of n-by-n matrices.\n is_missing: A boolean Tensor of shape `[..., n]`, representing a batch of\n masks. If `is_missing` is None, `x` is returned.\n Returns:\n A Tensor of shape `[..., n, n]`, representing a batch of n-by-n matrices.\n For each batch member `r`, element `r[i, j]` equals `eye(n)[i, j]` if\n dimension `i` or `j` is True in the corresponding input mask. Otherwise,\n `r[i, j]` equals the corresponding element from `x`.\n \"\"\"\n if is_missing is None:\n return x\n\n x = tf.convert_to_tensor(x)\n is_missing = tf.convert_to_tensor(is_missing, dtype=tf.bool)\n\n n = ps.dimension_size(x, -1)\n\n return tf.where(is_missing[..., tf.newaxis] | is_missing[..., tf.newaxis, :],\n tf.eye(n, dtype=x.dtype),\n x)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Independent distribution class.\"\"\"\n\nimport collections\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import math as tfp_math\nfrom tensorflow_probability.python.distributions import distribution as distribution_lib\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.distributions import log_prob_ratio\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n\nclass _Independent(distribution_lib.Distribution):\n \"\"\"Independent distribution from batch of distributions.\n\n This distribution is useful for regarding a collection of independent,\n non-identical distributions as a single random variable. For example, the\n `Independent` distribution composed of a collection of `Bernoulli`\n distributions might define a distribution over an image (where each\n `Bernoulli` is a distribution over each pixel).\n\n More precisely, a collection of `B` (independent) `E`-variate random variables\n (rv) `{X_1, ..., X_B}`, can be regarded as a `[B, E]`-variate random variable\n `(X_1, ..., X_B)` with probability\n `p(x_1, ..., x_B) = p_1(x_1) * ... * p_B(x_B)` where `p_b(X_b)` is the\n probability of the `b`-th rv. More generally `B, E` can be arbitrary shapes.\n\n Similarly, the `Independent` distribution specifies a distribution over `[B,\n E]`-shaped events. It operates by reinterpreting the rightmost batch dims as\n part of the event dimensions. The `reinterpreted_batch_ndims` parameter\n controls the number of batch dims which are absorbed as event dims;\n `reinterpreted_batch_ndims <= len(batch_shape)`. For example, the `log_prob`\n function entails a `reduce_sum` over the rightmost `reinterpreted_batch_ndims`\n after calling the base distribution's `log_prob`. In other words, since the\n batch dimension(s) index independent distributions, the resultant multivariate\n will have independent components.\n\n #### Mathematical Details\n\n The probability function is,\n\n ```none\n prob(x; reinterpreted_batch_ndims) = tf.reduce_prod(\n dist.prob(x),\n axis=-1-range(reinterpreted_batch_ndims))\n ```\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Make independent distribution from a 2-batch Normal.\n ind = tfd.Independent(\n distribution=tfd.Normal(loc=[-1., 1], scale=[0.1, 0.5]),\n reinterpreted_batch_ndims=1)\n\n # All batch dims have been 'absorbed' into event dims.\n ind.batch_shape # ==> []\n ind.event_shape # ==> [2]\n\n # Make independent distribution from a 2-batch bivariate Normal.\n ind = tfd.Independent(\n distribution=tfd.MultivariateNormalDiag(\n loc=[[-1., 1], [1, -1]],\n scale_identity_multiplier=[1., 0.5]),\n reinterpreted_batch_ndims=1)\n\n # All batch dims have been 'absorbed' into event dims.\n ind.batch_shape # ==> []\n ind.event_shape # ==> [2, 2]\n ```\n\n \"\"\"\n\n @deprecation.deprecated_arg_values(\n '2022-03-01',\n 'Please pass an integer value for `reinterpreted_batch_ndims`. The '\n 'current behavior corresponds to `reinterpreted_batch_ndims=tf.size('\n 'distribution.batch_shape_tensor()) - 1`.',\n reinterpreted_batch_ndims=None)\n def __init__(self,\n distribution,\n reinterpreted_batch_ndims=None,\n validate_args=False,\n experimental_use_kahan_sum=False,\n name=None):\n \"\"\"Construct an `Independent` distribution.\n\n Args:\n distribution: The base distribution instance to transform. Typically an\n instance of `Distribution`.\n reinterpreted_batch_ndims: Scalar, integer number of rightmost batch dims\n which will be regarded as event dims. When `None` all but the first\n batch axis (batch axis 0) will be transferred to event dimensions\n (analogous to `tf.layers.flatten`).\n validate_args: Python `bool`. Whether to validate input with asserts.\n If `validate_args` is `False`, and the inputs are invalid,\n correct behavior is not guaranteed.\n experimental_use_kahan_sum: Python `bool`. When `True`, we use Kahan\n summation to aggregate independent underlying log_prob values, which\n improves against the precision of a naive float32 sum. This can be\n noticeable in particular for large dimensions in float32. See CPU caveat\n on `tfp.math.reduce_kahan_sum`.\n name: The name for ops managed by the distribution.\n Default value: `Independent + distribution.name`.\n\n Raises:\n ValueError: if `reinterpreted_batch_ndims` exceeds\n `distribution.batch_ndims`\n \"\"\"\n parameters = dict(locals())\n self._experimental_use_kahan_sum = experimental_use_kahan_sum\n with tf.name_scope(name or ('Independent' + distribution.name)) as name:\n self._distribution = distribution\n\n if reinterpreted_batch_ndims is None:\n # If possible, statically infer reinterpreted_batch_ndims.\n batch_ndims = tensorshape_util.rank(distribution.batch_shape)\n if batch_ndims is not None:\n self._static_reinterpreted_batch_ndims = max(0, batch_ndims - 1)\n self._reinterpreted_batch_ndims = ps.convert_to_shape_tensor(\n self._static_reinterpreted_batch_ndims,\n dtype_hint=tf.int32,\n name='reinterpreted_batch_ndims')\n else:\n self._reinterpreted_batch_ndims = None\n self._static_reinterpreted_batch_ndims = None\n\n else:\n self._reinterpreted_batch_ndims = tensor_util.convert_nonref_to_tensor(\n reinterpreted_batch_ndims,\n dtype_hint=tf.int32,\n as_shape_tensor=True,\n name='reinterpreted_batch_ndims')\n static_val = tf.get_static_value(self._reinterpreted_batch_ndims)\n self._static_reinterpreted_batch_ndims = (\n None if static_val is None else int(static_val))\n\n super(_Independent, self).__init__(\n dtype=self._distribution.dtype,\n reparameterization_type=self._distribution.reparameterization_type,\n validate_args=validate_args,\n allow_nan_stats=self._distribution.allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @property\n def distribution(self):\n return self._distribution\n\n @property\n def reinterpreted_batch_ndims(self):\n return self._reinterpreted_batch_ndims\n\n @property\n def experimental_is_sharded(self):\n return self.distribution.experimental_is_sharded\n\n def _get_reinterpreted_batch_ndims(self,\n distribution_batch_shape_tensor=None):\n if self._static_reinterpreted_batch_ndims is not None:\n return self._static_reinterpreted_batch_ndims\n if self._reinterpreted_batch_ndims is not None:\n return tf.convert_to_tensor(self._reinterpreted_batch_ndims)\n\n if distribution_batch_shape_tensor is None:\n distribution_batch_shape_tensor = self.distribution.batch_shape_tensor()\n return ps.cast(\n ps.maximum(0, ps.size(distribution_batch_shape_tensor) - 1),\n np.int32)\n\n # TODO(davmre): Delete this override.\n # The default slicing machinery should work here after we remove support for\n # the deprecated init arg `reinterpreted_batch_ndims=None`.\n def __getitem__(self, slices):\n # Because slicing is parameterization-dependent, we only implement slicing\n # for instances of Independent, not subclasses thereof.\n if type(self) not in (_Independent, Independent): # pylint: disable=unidiomatic-typecheck\n return super(_Independent, self).__getitem__(slices)\n\n if self._static_reinterpreted_batch_ndims is None:\n raise NotImplementedError(\n 'Cannot slice Independent with non-static reinterpreted_batch_ndims')\n slices = (tuple(slices) if isinstance(slices, collections.abc.Sequence)\n else (slices,))\n if Ellipsis not in slices:\n slices = slices + (Ellipsis,)\n slices = slices + (slice(None),) * int(\n self._static_reinterpreted_batch_ndims)\n return self.copy(\n distribution=self.distribution[slices],\n reinterpreted_batch_ndims=self._static_reinterpreted_batch_ndims)\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n return dict(\n distribution=parameter_properties.BatchedComponentProperties(\n # TODO(davmre): replace with `self.reinterpreted_batch_ndims` once\n # support for `reinterpreted_batch_ndims=None` has been removed.\n event_ndims=lambda self: self._get_reinterpreted_batch_ndims()), # pylint: disable=protected-access\n reinterpreted_batch_ndims=(\n parameter_properties.ShapeParameterProperties()))\n\n def _batch_shape_tensor(self):\n batch_shape = self.distribution.batch_shape_tensor()\n batch_ndims = ps.rank_from_shape(\n batch_shape, self.distribution.batch_shape)\n return batch_shape[\n :batch_ndims - self._get_reinterpreted_batch_ndims(batch_shape)]\n\n def _batch_shape(self):\n batch_shape = self.distribution.batch_shape\n if (self._static_reinterpreted_batch_ndims is None or\n tensorshape_util.rank(batch_shape) is None):\n return tf.TensorShape(None)\n d = (tensorshape_util.rank(batch_shape) -\n self._static_reinterpreted_batch_ndims)\n return batch_shape[:d]\n\n def _event_shape_tensor(self):\n # If both `distribution.batch_shape` and `distribution.tensor_shape` are\n # known statically, then Distribution won't call this method. But this\n # method may be called wheh only one of them is statically known.\n batch_shape = self.distribution.batch_shape\n if not tensorshape_util.is_fully_defined(batch_shape):\n batch_shape = self.distribution.batch_shape_tensor()\n batch_ndims = ps.rank_from_shape(batch_shape)\n event_shape = self.distribution.event_shape\n if not tensorshape_util.is_fully_defined(event_shape):\n event_shape = self.distribution.event_shape_tensor()\n return ps.concat([\n ps.convert_to_shape_tensor(batch_shape)[\n batch_ndims - self._get_reinterpreted_batch_ndims(batch_shape):],\n event_shape\n ], axis=0)\n\n def _event_shape(self):\n batch_shape = self.distribution.batch_shape\n if self._static_reinterpreted_batch_ndims is None:\n return tf.TensorShape(None)\n if tensorshape_util.rank(batch_shape) is not None:\n reinterpreted_batch_shape = batch_shape[\n tensorshape_util.rank(batch_shape) -\n self._static_reinterpreted_batch_ndims:]\n else:\n reinterpreted_batch_shape = tf.TensorShape(\n [None] * int(self._static_reinterpreted_batch_ndims))\n return tensorshape_util.concatenate(reinterpreted_batch_shape,\n self.distribution.event_shape)\n\n def _sample_n(self, n, seed, **kwargs):\n return self.distribution.sample(sample_shape=n, seed=seed, **kwargs)\n\n def _sum_fn(self):\n if self._experimental_use_kahan_sum:\n return lambda x, axis: tfp_math.reduce_kahan_sum(x, axis).total\n return tf.math.reduce_sum\n\n def _sample_and_log_prob(self, sample_shape, seed, **kwargs):\n x, lp = self.distribution.experimental_sample_and_log_prob(\n sample_shape, seed=seed, **kwargs)\n return x, self._reduce(self._sum_fn(), lp)\n\n def _log_prob(self, x, **kwargs):\n return self._reduce(\n self._sum_fn(), self.distribution.log_prob(x, **kwargs))\n\n def _unnormalized_log_prob(self, x, **kwargs):\n return self._reduce(\n self._sum_fn(), self.distribution.unnormalized_log_prob(x, **kwargs))\n\n def _log_cdf(self, x, **kwargs):\n return self._reduce(self._sum_fn(), self.distribution.log_cdf(x, **kwargs))\n\n def _entropy(self, **kwargs):\n # NOTE: If self._reinterpreted_batch_ndims is None, we could avoid a read\n # of self.distribution.batch_shape_tensor() in `self._reduce` here by\n # passing in `tf.shape(self.distribution.entropy())` to use instead.\n return self._reduce(self._sum_fn(), self.distribution.entropy(**kwargs))\n\n def _mean(self, **kwargs):\n return self.distribution.mean(**kwargs)\n\n def _variance(self, **kwargs):\n return self.distribution.variance(**kwargs)\n\n def _stddev(self, **kwargs):\n return self.distribution.stddev(**kwargs)\n\n def _mode(self, **kwargs):\n return self.distribution.mode(**kwargs)\n\n def _default_event_space_bijector(self):\n bijector = self.distribution.experimental_default_event_space_bijector()\n if (bijector is not None and\n getattr(bijector,\n '_use_kahan_sum',\n False) != self._experimental_use_kahan_sum):\n # Copy in case the wrapped distribution doesn't construct a brand-new\n # bijector each time.\n bijector = bijector.copy()\n # TODO(b/191803645): Come up with an API to set this.\n bijector._use_kahan_sum = self._experimental_use_kahan_sum # pylint: disable=protected-access\n return bijector\n\n def _parameter_control_dependencies(self, is_init):\n # self, distribution, reinterpreted_batch_ndims, validate_args):\n assertions = []\n\n batch_ndims = tensorshape_util.rank(self.distribution.batch_shape)\n if (batch_ndims is not None\n and self._static_reinterpreted_batch_ndims is not None):\n if is_init and self._static_reinterpreted_batch_ndims > batch_ndims:\n raise ValueError('reinterpreted_batch_ndims({}) cannot exceed '\n 'distribution.batch_ndims({})'.format(\n self._static_reinterpreted_batch_ndims,\n batch_ndims))\n elif self.validate_args:\n batch_shape_tensor = self.distribution.batch_shape_tensor()\n assertions.append(\n assert_util.assert_less_equal(\n self._get_reinterpreted_batch_ndims(batch_shape_tensor),\n ps.rank_from_shape(batch_shape_tensor),\n message=('reinterpreted_batch_ndims cannot exceed '\n 'distribution.batch_ndims')))\n return assertions\n\n def _reduce(self, op, stat):\n axis = 1 + ps.range(self._get_reinterpreted_batch_ndims())\n return op(stat, axis=-axis)\n\n\nclass Independent(\n _Independent, distribution_lib.AutoCompositeTensorDistribution):\n\n def __new__(cls, *args, **kwargs):\n \"\"\"Maybe return a non-`CompositeTensor` `_Independent`.\"\"\"\n\n if cls is Independent:\n if args:\n distribution = args[0]\n else:\n distribution = kwargs.get('distribution')\n\n if not isinstance(distribution, tf.__internal__.CompositeTensor):\n return _Independent(*args, **kwargs)\n return super(Independent, cls).__new__(cls)\n\n\nIndependent.__doc__ = _Independent.__doc__ + '\\n' + (\n 'If `distribution` is a `CompositeTensor`, then the resulting '\n '`Independent` instance is a `CompositeTensor` as well. Otherwise, a '\n 'non-`CompositeTensor` `_Independent` instance is created instead. '\n 'Distribution subclasses that inherit from `Independent` will also inherit '\n 'from `CompositeTensor`.')\n\n\n@kullback_leibler.RegisterKL(_Independent, _Independent)\ndef _kl_independent(a, b, name='kl_independent'):\n \"\"\"Batched KL divergence `KL(a || b)` for Independent distributions.\n\n We can leverage the fact that\n ```\n KL(Independent(a) || Independent(b)) = sum(KL(a || b))\n ```\n where the sum is over the `reinterpreted_batch_ndims`.\n\n Args:\n a: Instance of `Independent`.\n b: Instance of `Independent`.\n name: (optional) name to use for created ops. Default 'kl_independent'.\n\n Returns:\n Batchwise `KL(a || b)`.\n\n Raises:\n ValueError: If the event space for `a` and `b`, or their underlying\n distributions don't match.\n \"\"\"\n p = a.distribution\n q = b.distribution\n\n # The KL between any two (non)-batched distributions is a scalar.\n # Given that the KL between two factored distributions is the sum, i.e.\n # KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute\n # KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.\n if (tensorshape_util.is_fully_defined(a.event_shape) and\n tensorshape_util.is_fully_defined(b.event_shape)):\n if a.event_shape == b.event_shape:\n if p.event_shape == q.event_shape:\n num_reduce_dims = (tensorshape_util.rank(a.event_shape) -\n tensorshape_util.rank(p.event_shape))\n reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]\n\n return tf.reduce_sum(\n kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)\n else:\n raise NotImplementedError('KL between Independents with different '\n 'event shapes not supported.')\n else:\n raise ValueError('Event shapes do not match.')\n else:\n p_event_shape_tensor = p.event_shape_tensor()\n q_event_shape_tensor = q.event_shape_tensor()\n # NOTE: We could optimize by passing the event_shape_tensor of p and q\n # to a.event_shape_tensor() and b.event_shape_tensor().\n a_event_shape_tensor = a.event_shape_tensor()\n b_event_shape_tensor = b.event_shape_tensor()\n with tf.control_dependencies(\n [\n assert_util.assert_equal(\n a_event_shape_tensor, b_event_shape_tensor,\n message='Event shapes do not match.'),\n assert_util.assert_equal(\n p_event_shape_tensor, q_event_shape_tensor,\n message='Event shapes do not match.'),\n ]):\n num_reduce_dims = (\n ps.rank_from_shape(\n a_event_shape_tensor, a.event_shape) -\n ps.rank_from_shape(\n p_event_shape_tensor, p.event_shape))\n reduce_dims = ps.range(-num_reduce_dims, 0, 1)\n return tf.reduce_sum(\n kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)\n\n\n@log_prob_ratio.RegisterLogProbRatio(_Independent)\ndef _independent_log_prob_ratio(p, x, q, y, name=None):\n \"\"\"Sum-of-diffs log(p(x)/q(y)) for `Independent`s.\"\"\"\n with tf.name_scope(name or 'independent_log_prob_ratio'):\n checks = []\n if p.validate_args or q.validate_args:\n checks.append(tf.debugging.assert_equal(\n p.reinterpreted_batch_ndims, q.reinterpreted_batch_ndims))\n if p._experimental_use_kahan_sum or q._experimental_use_kahan_sum: # pylint: disable=protected-access\n sum_fn = lambda x, axis: tfp_math.reduce_kahan_sum(x, axis).total\n else:\n sum_fn = tf.reduce_sum\n with tf.control_dependencies(checks):\n return sum_fn(\n log_prob_ratio.log_prob_ratio(p.distribution, x, q.distribution, y),\n axis=-1 - ps.range(p.reinterpreted_batch_ndims))\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Implements the Hager-Zhang inexact line search algorithm.\n\nLine searches are a central component for many optimization algorithms (e.g.\nBFGS, conjugate gradient etc). Most of the sophisticated line search methods\naim to find a step length in a given search direction so that the step length\nsatisfies the\n[Wolfe conditions](https://en.wikipedia.org/wiki/Wolfe_conditions).\n[Hager-Zhang 2006](https://epubs.siam.org/doi/abs/10.1137/030601880)\nalgorithm is a refinement of the commonly used\n[More-Thuente](https://dl.acm.org/citation.cfm?id=192132) algorithm.\n\nThis module implements the Hager-Zhang algorithm.\n\"\"\"\n\nimport collections\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.optimizer.linesearch.internal import hager_zhang_lib as hzl\n\n__all__ = [\n 'hager_zhang',\n]\n\n\ndef _machine_eps(dtype):\n \"\"\"Returns the machine epsilon for the supplied dtype.\"\"\"\n dtype = dtype_util.as_numpy_dtype(tf.as_dtype(dtype))\n return np.finfo(dtype).eps\n\n\nHagerZhangLineSearchResult = collections.namedtuple(\n 'HagerZhangLineSearchResults', [\n 'converged', # Whether a point satisfying Wolfe/Approx wolfe was found.\n 'failed', # Whether the line search failed. It can fail if either the\n # objective function or the gradient are not finite at\n # an evaluation point.\n 'func_evals', # Number of function evaluations made.\n 'iterations', # Number of line search iterations made.\n 'left', # The left end point of the final bracketing interval.\n # If converged is True, it is equal to `right`.\n # Otherwise, it corresponds to the last interval computed.\n 'right' # The right end point of the final bracketing interval.\n # If converged is True, it is equal to `left`.\n # Otherwise, it corresponds to the last interval computed.\n ])\n\n\ndef hager_zhang(value_and_gradients_function,\n initial_step_size=None,\n value_at_initial_step=None,\n value_at_zero=None,\n converged=None,\n threshold_use_approximate_wolfe_condition=1e-6,\n shrinkage_param=0.66,\n expansion_param=5.0,\n sufficient_decrease_param=0.1,\n curvature_param=0.9,\n max_iterations=50,\n name=None):\n \"\"\"The Hager Zhang line search algorithm.\n\n Performs an inexact line search based on the algorithm of\n [Hager and Zhang (2006)][2].\n The univariate objective function `value_and_gradients_function` is typically\n generated by projecting a multivariate objective function along a search\n direction. Suppose the multivariate function to be minimized is\n `g(x1,x2, .. xn)`. Let (d1, d2, ..., dn) be the direction along which we wish\n to perform a line search. Then the projected univariate function to be used\n for line search is\n\n ```None\n f(a) = g(x1 + d1 * a, x2 + d2 * a, ..., xn + dn * a)\n ```\n\n The directional derivative along (d1, d2, ..., dn) is needed for this\n procedure. This also corresponds to the derivative of the projected function\n `f(a)` with respect to `a`. Note that this derivative must be negative for\n `a = 0` if the direction is a descent direction.\n\n The usual stopping criteria for the line search is the satisfaction of the\n (weak) Wolfe conditions. For details of the Wolfe conditions, see\n ref. [3]. On a finite precision machine, the exact Wolfe conditions can\n be difficult to satisfy when one is very close to the minimum and as argued\n by [Hager and Zhang (2005)][1], one can only expect the minimum to be\n determined within square root of machine precision. To improve the situation,\n they propose to replace the Wolfe conditions with an approximate version\n depending on the derivative of the function which is applied only when one\n is very close to the minimum. The following algorithm implements this\n enhanced scheme.\n\n ### Usage:\n\n Primary use of line search methods is as an internal component of a class of\n optimization algorithms (called line search based methods as opposed to\n trust region methods). Hence, the end user will typically not want to access\n line search directly. In particular, inexact line search should not be\n confused with a univariate minimization method. The stopping criteria of line\n search is the satisfaction of Wolfe conditions and not the discovery of the\n minimum of the function.\n\n With this caveat in mind, the following example illustrates the standalone\n usage of the line search.\n\n ```python\n # Define value and gradient namedtuple\n ValueAndGradient = namedtuple('ValueAndGradient', ['x', 'f', 'df'])\n # Define a quadratic target with minimum at 1.3.\n def value_and_gradients_function(x):\n return ValueAndGradient(x=x, f=(x - 1.3) ** 2, df=2 * (x-1.3))\n # Set initial step size.\n step_size = tf.constant(0.1)\n ls_result = tfp.optimizer.linesearch.hager_zhang(\n value_and_gradients_function, initial_step_size=step_size)\n # Evaluate the results.\n with tf.Session() as session:\n results = session.run(ls_result)\n # Ensure convergence.\n assert results.converged\n # If the line search converged, the left and the right ends of the\n # bracketing interval are identical.\n assert results.left.x == result.right.x\n # Print the number of evaluations and the final step size.\n print (\"Final Step Size: %f, Evaluations: %d\" % (results.left.x,\n results.func_evals))\n ```\n\n ### References:\n [1]: William Hager, Hongchao Zhang. A new conjugate gradient method with\n guaranteed descent and an efficient line search. SIAM J. Optim., Vol 16. 1,\n pp. 170-172. 2005.\n https://www.math.lsu.edu/~hozhang/papers/cg_descent.pdf\n\n [2]: William Hager, Hongchao Zhang. Algorithm 851: CG_DESCENT, a conjugate\n gradient method with guaranteed descent. ACM Transactions on Mathematical\n Software, Vol 32., 1, pp. 113-137. 2006.\n http://users.clas.ufl.edu/hager/papers/CG/cg_compare.pdf\n\n [3]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in\n Operations Research. pp 33-36. 2006\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that\n correspond to scalar tensors of real dtype containing the point at which\n the function was evaluated, the value of the function, and its\n derivative at that point. The other namedtuple fields, if present,\n should be tensors or sequences (possibly nested) of tensors.\n In usual optimization application, this function would be generated by\n projecting the multivariate objective function along some specific\n direction. The direction is determined by some other procedure but should\n be a descent direction (i.e. the derivative of the projected univariate\n function must be negative at 0.).\n Alternatively, the function may represent the batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned\n namedtuple should each be a tensor of shape [n], with the corresponding\n input points, function values, and derivatives at those input points.\n initial_step_size: (Optional) Scalar positive `Tensor` of real dtype, or\n a tensor of shape [n] in batching mode. The initial value (or values) to\n try to bracket the minimum. Default is `1.` as a float32.\n Note that this point need not necessarily bracket the minimum for the line\n search to work correctly but the supplied value must be greater than 0.\n A good initial value will make the search converge faster.\n value_at_initial_step: (Optional) The full return value of evaluating\n value_and_gradients_function at initial_step_size, i.e. a namedtuple with\n 'x', 'f', 'df', if already known by the caller. If supplied the value of\n `initial_step_size` will be ignored, otherwise the tuple will be computed\n by evaluating value_and_gradients_function.\n value_at_zero: (Optional) The full return value of\n value_and_gradients_function at `0.`, i.e. a namedtuple with\n 'x', 'f', 'df', if already known by the caller. If not supplied the tuple\n will be computed by evaluating value_and_gradients_function.\n converged: (Optional) In batching mode a tensor of shape [n], indicating\n batch members which have already converged and no further search should\n be performed. These batch members are also reported as converged in the\n output, and both their `left` and `right` are set to the\n `value_at_initial_step`.\n threshold_use_approximate_wolfe_condition: Scalar positive `Tensor`\n of real dtype. Corresponds to the parameter 'epsilon' in\n [Hager and Zhang (2006)][2]. Used to estimate the\n threshold at which the line search switches to approximate Wolfe\n conditions.\n shrinkage_param: Scalar positive Tensor of real dtype. Must be less than\n `1.`. Corresponds to the parameter `gamma` in\n [Hager and Zhang (2006)][2].\n If the secant**2 step does not shrink the bracketing interval by this\n proportion, a bisection step is performed to reduce the interval width.\n expansion_param: Scalar positive `Tensor` of real dtype. Must be greater\n than `1.`. Used to expand the initial interval in case it does not bracket\n a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].\n sufficient_decrease_param: Positive scalar `Tensor` of real dtype.\n Bounded above by the curvature param. Corresponds to `delta` in the\n terminology of [Hager and Zhang (2006)][2].\n curvature_param: Positive scalar `Tensor` of real dtype. Bounded above\n by `1.`. Corresponds to 'sigma' in the terminology of\n [Hager and Zhang (2006)][2].\n max_iterations: Positive scalar `Tensor` of integral dtype or None. The\n maximum number of iterations to perform in the line search. The number of\n iterations used to bracket the minimum are also counted against this\n parameter.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'hager_zhang' is used.\n\n Returns:\n results: A namedtuple containing the following attributes.\n converged: Boolean `Tensor` of shape [n]. Whether a point satisfying\n Wolfe/Approx wolfe was found.\n failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.\n if either the objective function or the gradient are not finite at\n an evaluation point.\n iterations: int32 `Tensor` of shape [n]. Number of line search iterations.\n func_evals: Scalar int32 `Tensor`. Number of function evaluations made.\n left: A namedtuple, as returned by value_and_gradients_function,\n of the left end point of the final bracketing interval. Values are\n equal to those of `right` on batch members where converged is True.\n Otherwise, it corresponds to the last interval computed.\n right: A namedtuple, as returned by value_and_gradients_function,\n of the right end point of the final bracketing interval. Values are\n equal to those of `left` on batch members where converged is True.\n Otherwise, it corresponds to the last interval computed.\n \"\"\"\n with tf.name_scope(name or 'hager_zhang'):\n val_0, val_initial, f_lim, prepare_evals = _prepare_args(\n value_and_gradients_function,\n initial_step_size,\n value_at_initial_step,\n value_at_zero,\n threshold_use_approximate_wolfe_condition)\n\n valid_inputs = (hzl.is_finite(val_0) & (val_0.df < 0) &\n tf.math.is_finite(val_initial.x) & (val_initial.x > 0))\n\n if converged is None:\n init_converged = tf.zeros_like(valid_inputs) # i.e. all false.\n else:\n init_converged = tf.convert_to_tensor(converged)\n\n failed = ~init_converged & ~valid_inputs\n\n init_interval = HagerZhangLineSearchResult(\n converged=init_converged,\n failed=failed,\n func_evals=prepare_evals,\n iterations=tf.zeros_like(valid_inputs, dtype=tf.int32),\n left=val_0,\n right=hzl.val_where(init_converged, val_0, val_initial))\n\n def _apply_bracket_and_search():\n \"\"\"Bracketing and searching to do for valid inputs.\"\"\"\n return _bracket_and_search(\n value_and_gradients_function, init_interval, f_lim, max_iterations,\n shrinkage_param, expansion_param, sufficient_decrease_param,\n curvature_param)\n\n init_active = ~init_interval.failed & ~init_interval.converged\n return prefer_static.cond(\n tf.reduce_any(init_active),\n _apply_bracket_and_search,\n lambda: init_interval)\n\n\n_LineSearchInnerResult = collections.namedtuple('_LineSearchInnerResult', [\n 'iteration',\n 'found_wolfe',\n 'failed',\n 'num_evals',\n 'left',\n 'right'])\n\n\ndef _bracket_and_search(\n value_and_gradients_function,\n init_interval,\n f_lim,\n max_iterations,\n shrinkage_param,\n expansion_param,\n sufficient_decrease_param,\n curvature_param):\n \"\"\"Brackets the minimum and performs a line search.\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that\n correspond to scalar tensors of real dtype containing the point at which\n the function was evaluated, the value of the function, and its\n derivative at that point. The other namedtuple fields, if present,\n should be tensors or sequences (possibly nested) of tensors.\n In usual optimization application, this function would be generated by\n projecting the multivariate objective function along some specific\n direction. The direction is determined by some other procedure but should\n be a descent direction (i.e. the derivative of the projected univariate\n function must be negative at 0.).\n Alternatively, the function may represent the batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned\n namedtuple should each be a tensor of shape [n], with the corresponding\n input points, function values, and derivatives at those input points.\n init_interval: Instance of `HagerZhangLineSearchResults` containing\n the initial line search interval. The gradient of init_interval.left must\n be negative (i.e. must be a descent direction), while init_interval.right\n must be positive and finite.\n f_lim: Scalar `Tensor` of float dtype.\n max_iterations: Positive scalar `Tensor` of integral dtype. The maximum\n number of iterations to perform in the line search. The number of\n iterations used to bracket the minimum are also counted against this\n parameter.\n shrinkage_param: Scalar positive Tensor of real dtype. Must be less than\n `1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].\n expansion_param: Scalar positive `Tensor` of real dtype. Must be greater\n than `1.`. Used to expand the initial interval in case it does not bracket\n a minimum. Corresponds to `rho` in [Hager and Zhang (2006)][2].\n sufficient_decrease_param: Positive scalar `Tensor` of real dtype.\n Bounded above by the curvature param. Corresponds to `delta` in the\n terminology of [Hager and Zhang (2006)][2].\n curvature_param: Positive scalar `Tensor` of real dtype. Bounded above\n by `1.`. Corresponds to 'sigma' in the terminology of\n [Hager and Zhang (2006)][2].\n\n Returns:\n A namedtuple containing the following fields.\n converged: Boolean `Tensor` of shape [n]. Whether a point satisfying\n Wolfe/Approx wolfe was found.\n failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.\n if either the objective function or the gradient are not finite at\n an evaluation point.\n iterations: int32 `Tensor` of shape [n]. Number of line search iterations.\n func_evals: Scalar int32 `Tensor`. Number of function evaluations made.\n left: A namedtuple, as returned by value_and_gradients_function,\n of the left end point of the updated bracketing interval.\n right: A namedtuple, as returned by value_and_gradients_function,\n of the right end point of the updated bracketing interval.\n \"\"\"\n bracket_result = hzl.bracket(value_and_gradients_function, init_interval,\n f_lim, max_iterations, expansion_param)\n\n converged = init_interval.converged | _very_close(\n bracket_result.left.x, bracket_result.right.x)\n\n # We fail if we have not yet converged but already exhausted all iterations.\n exhausted_iterations = ~converged & (\n bracket_result.iteration >= max_iterations)\n\n line_search_args = HagerZhangLineSearchResult(\n converged=converged,\n failed=bracket_result.failed | exhausted_iterations,\n iterations=bracket_result.iteration,\n func_evals=bracket_result.num_evals,\n left=bracket_result.left,\n right=bracket_result.right)\n\n return _line_search_after_bracketing(\n value_and_gradients_function, line_search_args, init_interval.left,\n f_lim, max_iterations, sufficient_decrease_param, curvature_param,\n shrinkage_param)\n\n\ndef _line_search_after_bracketing(\n value_and_gradients_function,\n search_interval,\n val_0,\n f_lim,\n max_iterations,\n sufficient_decrease_param,\n curvature_param,\n shrinkage_param):\n \"\"\"The main loop of line search after the minimum has been bracketed.\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that\n correspond to scalar tensors of real dtype containing the point at which\n the function was evaluated, the value of the function, and its\n derivative at that point. The other namedtuple fields, if present,\n should be tensors or sequences (possibly nested) of tensors.\n In usual optimization application, this function would be generated by\n projecting the multivariate objective function along some specific\n direction. The direction is determined by some other procedure but should\n be a descent direction (i.e. the derivative of the projected univariate\n function must be negative at 0.).\n Alternatively, the function may represent the batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned\n namedtuple should each be a tensor of shape [n], with the corresponding\n input points, function values, and derivatives at those input points.\n search_interval: Instance of `HagerZhangLineSearchResults` containing\n the current line search interval.\n val_0: A namedtuple as returned by value_and_gradients_function evaluated\n at `0.`. The gradient must be negative (i.e. must be a descent direction).\n f_lim: Scalar `Tensor` of float dtype.\n max_iterations: Positive scalar `Tensor` of integral dtype. The maximum\n number of iterations to perform in the line search. The number of\n iterations used to bracket the minimum are also counted against this\n parameter.\n sufficient_decrease_param: Positive scalar `Tensor` of real dtype.\n Bounded above by the curvature param. Corresponds to `delta` in the\n terminology of [Hager and Zhang (2006)][2].\n curvature_param: Positive scalar `Tensor` of real dtype. Bounded above\n by `1.`. Corresponds to 'sigma' in the terminology of\n [Hager and Zhang (2006)][2].\n shrinkage_param: Scalar positive Tensor of real dtype. Must be less than\n `1.`. Corresponds to the parameter `gamma` in [Hager and Zhang (2006)][2].\n\n Returns:\n A namedtuple containing the following fields.\n converged: Boolean `Tensor` of shape [n]. Whether a point satisfying\n Wolfe/Approx wolfe was found.\n failed: Boolean `Tensor` of shape [n]. Whether line search failed e.g.\n if either the objective function or the gradient are not finite at\n an evaluation point.\n iterations: int32 `Tensor` of shape [n]. Number of line search iterations.\n func_evals: Scalar int32 `Tensor`. Number of function evaluations made.\n left: A namedtuple, as returned by value_and_gradients_function,\n of the left end point of the updated bracketing interval.\n right: A namedtuple, as returned by value_and_gradients_function,\n of the right end point of the updated bracketing interval.\n \"\"\"\n\n def _loop_cond(curr_interval):\n \"\"\"Loop condition.\"\"\"\n active = ~(curr_interval.converged | curr_interval.failed)\n return tf.reduce_any((curr_interval.iterations < max_iterations) & active)\n\n def _loop_body(curr_interval):\n \"\"\"The loop body.\"\"\"\n active = ~(curr_interval.converged | curr_interval.failed)\n # TODO(b/208441613): Skip updates for batch members that are not active?\n secant2_raw_result = hzl.secant2(\n value_and_gradients_function, val_0, curr_interval, f_lim,\n sufficient_decrease_param, curvature_param)\n secant2_result = HagerZhangLineSearchResult(\n ## TODO(b/208441613): `& ~curr_interval.failed` should not be needed.\n converged=secant2_raw_result.converged & ~curr_interval.failed,\n ## TODO(b/208441613): `| curr_interval.failed` should not be needed.\n failed=secant2_raw_result.failed | curr_interval.failed,\n iterations=curr_interval.iterations + tf.cast(active, tf.int32),\n func_evals=secant2_raw_result.num_evals,\n left=secant2_raw_result.left,\n right=secant2_raw_result.right)\n\n should_check_shrinkage = ~(secant2_result.converged | secant2_result.failed)\n\n def _do_check_shrinkage():\n \"\"\"Check if interval has shrinked enough.\"\"\"\n old_width = curr_interval.right.x - curr_interval.left.x\n new_width = secant2_result.right.x - secant2_result.left.x\n sufficient_shrinkage = new_width < old_width * shrinkage_param\n func_is_flat = (\n _very_close(curr_interval.left.f, curr_interval.right.f) &\n _very_close(secant2_result.left.f, secant2_result.right.f))\n\n new_converged = (\n should_check_shrinkage & sufficient_shrinkage & func_is_flat)\n needs_inner_bisect = should_check_shrinkage & ~sufficient_shrinkage\n\n inner_bisect_args = secant2_result._replace(\n converged=secant2_result.converged | new_converged)\n\n def _apply_inner_bisect():\n return _line_search_inner_bisection(\n value_and_gradients_function, inner_bisect_args,\n needs_inner_bisect, f_lim)\n\n return prefer_static.cond(\n tf.reduce_any(needs_inner_bisect),\n _apply_inner_bisect,\n lambda: inner_bisect_args)\n\n next_args = prefer_static.cond(\n tf.reduce_any(should_check_shrinkage),\n _do_check_shrinkage,\n lambda: secant2_result)\n\n interval_shrunk = (\n ~next_args.failed & _very_close(next_args.left.x, next_args.right.x))\n return [next_args._replace(converged=next_args.converged | interval_shrunk)]\n\n return tf.while_loop(\n cond=_loop_cond,\n body=_loop_body,\n loop_vars=[search_interval],\n parallel_iterations=1)[0]\n\n\ndef _line_search_inner_bisection(\n value_and_gradients_function,\n search_interval,\n active,\n f_lim):\n \"\"\"Performs bisection and updates the interval.\"\"\"\n midpoint = (search_interval.left.x + search_interval.right.x) / 2\n val_mid = value_and_gradients_function(midpoint)\n is_valid_mid = hzl.is_finite(val_mid)\n\n still_active = active & is_valid_mid\n new_failed = active & ~is_valid_mid\n next_inteval = search_interval._replace(\n failed=search_interval.failed | new_failed,\n func_evals=search_interval.func_evals + 1)\n\n def _apply_update():\n update_result = hzl.update(\n value_and_gradients_function, next_inteval.left, next_inteval.right,\n val_mid, f_lim, active=still_active)\n return HagerZhangLineSearchResult(\n converged=next_inteval.converged,\n failed=next_inteval.failed | update_result.failed,\n iterations=next_inteval.iterations + update_result.iteration,\n func_evals=next_inteval.func_evals + update_result.num_evals,\n left=update_result.left,\n right=update_result.right)\n\n return prefer_static.cond(\n tf.reduce_any(still_active), _apply_update, lambda: next_inteval)\n\n\ndef _prepare_args(value_and_gradients_function,\n initial_step_size,\n val_initial,\n val_0,\n approximate_wolfe_threshold):\n \"\"\"Prepares the arguments for the line search initialization.\n\n Args:\n value_and_gradients_function: A Python callable that accepts a real scalar\n tensor and returns a namedtuple with the fields 'x', 'f', and 'df' that\n correspond to scalar tensors of real dtype containing the point at which\n the function was evaluated, the value of the function, and its\n derivative at that point. The other namedtuple fields, if present,\n should be tensors or sequences (possibly nested) of tensors.\n In usual optimization application, this function would be generated by\n projecting the multivariate objective function along some specific\n direction. The direction is determined by some other procedure but should\n be a descent direction (i.e. the derivative of the projected univariate\n function must be negative at 0.).\n Alternatively, the function may represent the batching of `n` such line\n functions (e.g. projecting a single multivariate objective function along\n `n` distinct directions at once) accepting n points as input, i.e. a\n tensor of shape [n], and the fields 'x', 'f' and 'df' in the returned\n namedtuple should each be a tensor of shape [n], with the corresponding\n input points, function values, and derivatives at those input points.\n initial_step_size: Scalar positive `Tensor` of real dtype, or a tensor of\n shape [n] in batching mode. The initial value (or values) to try to\n bracket the minimum. Default is `1.` as a float32.\n Note that this point need not necessarily bracket the minimum for the line\n search to work correctly but the supplied value must be greater than 0.\n A good initial value will make the search converge faster.\n val_initial: The full return value of evaluating\n value_and_gradients_function at initial_step_size, i.e. a namedtuple with\n 'x', 'f', 'df', if already known by the caller. If not None the value of\n `initial_step_size` will be ignored, otherwise the tuple will be computed\n by evaluating value_and_gradients_function.\n val_0: The full return value of value_and_gradients_function at `0.`, i.e.\n a namedtuple with 'x', 'f', 'df', if already known by the caller. If None\n the tuple will be computed by evaluating value_and_gradients_function.\n approximate_wolfe_threshold: Scalar positive `Tensor` of\n real dtype. Corresponds to the parameter 'epsilon' in\n [Hager and Zhang (2006)][2]. Used to estimate the\n threshold at which the line search switches to approximate Wolfe\n conditions.\n\n Returns:\n left: A namedtuple, as returned by value_and_gradients_function,\n containing the value and derivative of the function at `0.`.\n val_initial: A namedtuple, as returned by value_and_gradients_function,\n containing the value and derivative of the function at\n `initial_step_size`.\n f_lim: Real `Tensor` of shape [n]. The function value threshold for\n the approximate Wolfe conditions to be checked.\n eval_count: Scalar int32 `Tensor`. The number of target function\n evaluations made by this function.\n \"\"\"\n eval_count = 0\n if val_initial is None:\n if initial_step_size is not None:\n initial_step_size = tf.convert_to_tensor(initial_step_size)\n else:\n initial_step_size = np.float32(1.)\n val_initial = value_and_gradients_function(initial_step_size)\n eval_count += 1\n\n if val_0 is None:\n x_0 = tf.zeros_like(val_initial.x)\n val_0 = value_and_gradients_function(x_0)\n eval_count += 1\n\n f_lim = val_0.f + (approximate_wolfe_threshold * tf.math.abs(val_0.f))\n return val_0, val_initial, f_lim, tf.convert_to_tensor(eval_count)\n\n\ndef _very_close(x, y):\n return tf.math.nextafter(x, y) >= y\n\n\ndef _to_str(x):\n \"\"\"Converts a bool tensor to a string with True/False values.\"\"\"\n x = tf.convert_to_tensor(x)\n if x.dtype == tf.bool:\n return tf.where(x, 'True', 'False')\n return x\n\n\n# A convenience function useful while debugging in the graph mode.\ndef _print(pass_through_tensor, values):\n \"\"\"Wrapper for tf.Print which supports lists and namedtuples for printing.\"\"\"\n flat_values = []\n for value in values:\n # Checks if it is a namedtuple.\n if hasattr(value, '_fields'):\n for field in value._fields:\n flat_values.extend([field, _to_str(getattr(value, field))])\n continue\n if isinstance(value, (list, tuple)):\n for v in value:\n flat_values.append(_to_str(v))\n continue\n flat_values.append(_to_str(value))\n return tf.Print(pass_through_tensor, flat_values)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Batch Norm bijector.\"\"\"\n\n\n# Dependency imports\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector\n\n\n__all__ = [\n 'BatchNormalization',\n]\n\n\ndef _undo_batch_normalization(x,\n mean,\n variance,\n offset,\n scale,\n variance_epsilon,\n name=None):\n r\"\"\"Inverse of tf.nn.batch_normalization.\n\n Args:\n x: Input `Tensor` of arbitrary dimensionality.\n mean: A mean `Tensor`.\n variance: A variance `Tensor`.\n offset: An offset `Tensor`, often denoted `beta` in equations, or\n None. If present, will be added to the normalized tensor.\n scale: A scale `Tensor`, often denoted `gamma` in equations, or\n `None`. If present, the scale is applied to the normalized tensor.\n variance_epsilon: A small `float` added to the minibatch `variance` to\n prevent dividing by zero.\n name: A name for this operation (optional).\n\n Returns:\n batch_unnormalized: The de-normalized, de-scaled, de-offset `Tensor`.\n \"\"\"\n with tf.name_scope(name or 'undo_batch_normalization'):\n # inv = tf.rsqrt(variance + variance_epsilon)\n # if scale is not None:\n # inv *= scale\n # return x * inv + (\n # offset - mean * inv if offset is not None else -mean * inv)\n rescale = tf.sqrt(variance + variance_epsilon)\n if scale is not None:\n rescale = rescale / scale\n batch_unnormalized = x * rescale + (\n mean - offset * rescale if offset is not None else mean)\n return batch_unnormalized\n\n\nclass BatchNormalization(bijector.Bijector):\n \"\"\"Compute `Y = g(X) s.t. X = g^-1(Y) = (Y - mean(Y)) / std(Y)`.\n\n Applies Batch Normalization [(Ioffe and Szegedy, 2015)][1] to samples from a\n data distribution. This can be used to stabilize training of normalizing\n flows ([Papamakarios et al., 2016][3]; [Dinh et al., 2017][2])\n\n When training Deep Neural Networks (DNNs), it is common practice to\n normalize or whiten features by shifting them to have zero mean and\n scaling them to have unit variance.\n\n The `inverse()` method of the `BatchNormalization` bijector, which is used in\n the log-likelihood computation of data samples, implements the normalization\n procedure (shift-and-scale) using the mean and standard deviation of the\n current minibatch.\n\n Conversely, the `forward()` method of the bijector de-normalizes samples (e.g.\n `X*std(Y) + mean(Y)` with the running-average mean and standard deviation\n computed at training-time. De-normalization is useful for sampling.\n\n ```python\n distribution = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=[0.0], scale=[1.0]),\n bijector=tfb.BatchNormalization())\n\n y = tfd.Normal(loc=[1.0], scale_diag=[2.0]).sample(100) # ~ N(1, 2)\n x = distribution.bijector.inverse(y) # ~ N(0, 1)\n y2 = distribution.sample(100) # ~ N(1, 2)\n ```\n\n During training time, `BatchNormalization.inverse` and\n `BatchNormalization.forward` are not guaranteed to be inverses of each other\n because `inverse(y)` uses statistics of the current minibatch, while\n `forward(x)` uses running-average statistics accumulated from training. In\n other words, `BatchNormalization.inverse(BatchNormalization.forward(...))` and\n `BatchNormalization.forward(BatchNormalization.inverse(...))` will be\n identical when `training=False` but may be different when `training=True`.\n\n #### References\n\n [1]: Sergey Ioffe and Christian Szegedy. Batch Normalization: Accelerating\n Deep Network Training by Reducing Internal Covariate Shift. In\n _International Conference on Machine Learning_, 2015.\n https://arxiv.org/abs/1502.03167\n\n [2]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation\n using Real NVP. In _International Conference on Learning\n Representations_, 2017. https://arxiv.org/abs/1605.08803\n\n [3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked\n Autoregressive Flow for Density Estimation. In _Neural Information\n Processing Systems_, 2017. https://arxiv.org/abs/1705.07057\n \"\"\"\n\n def __init__(self,\n batchnorm_layer=None,\n training=True,\n validate_args=False,\n name='batch_normalization'):\n \"\"\"Instantiates the `BatchNormalization` bijector.\n\n Args:\n batchnorm_layer: `tf.layers.BatchNormalization` layer object. If `None`,\n defaults to a `tf.keras.layers.BatchNormalization` with\n `gamma_constraint=tf.nn.relu(x) + 1e-6)`.\n This ensures positivity of the scale variable.\n\n training: If True, updates running-average statistics during call to\n `inverse()`.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n Raises:\n ValueError: If bn_layer is not an instance of\n `tf.layers.BatchNormalization`, or if it is specified with `renorm=True`\n or a virtual batch size.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n # Scale must be positive.\n g_constraint = lambda x: tf.nn.relu(x) + 1e-6\n self.batchnorm = batchnorm_layer or tf.keras.layers.BatchNormalization(\n gamma_constraint=g_constraint)\n self._validate_bn_layer(self.batchnorm)\n self._training = training\n if isinstance(self.batchnorm.axis, int):\n forward_min_event_ndims = 1\n else:\n forward_min_event_ndims = len(self.batchnorm.axis)\n super(BatchNormalization, self).__init__(\n forward_min_event_ndims=forward_min_event_ndims,\n validate_args=validate_args,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _parameter_properties(cls, dtype):\n return dict()\n\n def _validate_bn_layer(self, layer):\n \"\"\"Check for valid BatchNormalization layer.\n\n Args:\n layer: Instance of `tf.layers.BatchNormalization`.\n Raises:\n ValueError: If batchnorm_layer argument is not an instance of\n `tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or\n if `batchnorm_layer.virtual_batch_size` is specified.\n \"\"\"\n if (not isinstance(layer, tf.keras.layers.BatchNormalization) and\n not isinstance(layer, tf1.layers.BatchNormalization)):\n raise ValueError(\n 'batchnorm_layer must be an instance of '\n '`tf.keras.layers.BatchNormalization` or '\n '`tf.compat.v1.layers.BatchNormalization`. Got {}'.format(\n type(layer)))\n if layer.renorm:\n raise ValueError(\n '`BatchNormalization` Bijector does not support renormalization, '\n 'but `batchnorm_layer.renorm` is `True`.')\n if layer.virtual_batch_size:\n raise ValueError(\n '`BatchNormlization` Bijector does not support virtual batch sizes, '\n 'but `batchnorm_layer.virtual_batch_size` is `True`.')\n\n def _get_broadcast_fn(self, x):\n ndims = len(x.shape)\n reduction_axes = [i for i in range(ndims) if i not in self.batchnorm.axis]\n # Broadcasting only necessary for single-axis batch norm where the axis is\n # not the last dimension\n broadcast_shape = [1] * ndims\n broadcast_shape[self.batchnorm.axis[0]] = x.shape[self.batchnorm.axis[0]]\n def _broadcast(v):\n if (v is not None and\n len(v.shape) != ndims and\n reduction_axes != list(range(ndims - 1))):\n return tf.reshape(v, broadcast_shape)\n return v\n return _broadcast\n\n def _normalize(self, y):\n return self.batchnorm(y, training=self._training)\n\n def _de_normalize(self, x):\n # Uses the saved statistics.\n if not self.batchnorm.built:\n self.batchnorm.build(x.shape)\n broadcast_fn = self._get_broadcast_fn(x)\n mean = broadcast_fn(self.batchnorm.moving_mean)\n variance = broadcast_fn(self.batchnorm.moving_variance)\n beta = broadcast_fn(self.batchnorm.beta) if self.batchnorm.center else None\n gamma = broadcast_fn(self.batchnorm.gamma) if self.batchnorm.scale else None\n return _undo_batch_normalization(\n x, mean, variance, beta, gamma, self.batchnorm.epsilon)\n\n def _forward(self, x):\n return self._de_normalize(x)\n\n def _inverse(self, y):\n return self._normalize(y)\n\n def _forward_log_det_jacobian(self, x):\n # Uses saved statistics to compute volume distortion.\n return -self._inverse_log_det_jacobian(x, use_saved_statistics=True)\n\n def _inverse_log_det_jacobian(self, y, use_saved_statistics=False):\n if not self.batchnorm.built:\n # Create variables.\n self.batchnorm.build(y.shape)\n\n event_dims = self.batchnorm.axis\n reduction_axes = [i for i in range(len(y.shape)) if i not in event_dims]\n\n # At training-time, ildj is computed from the mean and log-variance across\n # the current minibatch.\n # We use multiplication instead of tf.where() to get easier broadcasting.\n log_variance = tf.math.log(\n tf.where(\n tf.logical_or(use_saved_statistics, tf.logical_not(self._training)),\n self.batchnorm.moving_variance,\n tf.nn.moments(x=y, axes=reduction_axes, keepdims=True)[1]) +\n self.batchnorm.epsilon)\n\n # TODO(b/137216713): determine whether it's unsafe for the reduce_sums below\n # to happen across all axes.\n # `gamma` and `log Var(y)` reductions over event_dims.\n # Log(total change in area from gamma term).\n log_total_gamma = tf.reduce_sum(tf.math.log(self.batchnorm.gamma))\n\n # Log(total change in area from log-variance term).\n log_total_variance = tf.reduce_sum(log_variance)\n # The ildj is scalar, as it does not depend on the values of x and are\n # constant across minibatch elements.\n return log_total_gamma - 0.5 * log_total_variance\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Implementation of the Bayesian model class.\"\"\"\n\nimport collections\n\nimport tensorflow.compat.v2 as tf\nfrom inference_gym.internal import ground_truth_encoding\n# Direct import for flatten_with_tuple_paths.\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\n__all__ = [\n 'Model',\n]\n\n\ndef _populate_ground_truth(sample_transformations, module):\n \"\"\"Populates the ground truth values from a module.\n\n Args:\n sample_transformations: A dictionary of Python strings to\n `SampleTransformation`s.\n module: Python module from which to load the ground truth.\n\n Returns:\n sample_transformations: Same as the input `sample_transformations`, but with\n the ground truth values populated.\n \"\"\"\n sample_transformations = sample_transformations.copy()\n for name, sample_transformation in sample_transformations.items():\n flat_mean = []\n flat_sem = []\n flat_std = []\n flat_sestd = []\n for tuple_path, _ in nest.flatten_with_tuple_paths(\n sample_transformation.dtype):\n mean, sem, std, sestd = ground_truth_encoding.load_ground_truth_part(\n module, name, tuple_path)\n flat_mean.append(mean)\n flat_sem.append(sem)\n flat_std.append(std)\n flat_sestd.append(sestd)\n\n def _pack_or_none(flat_parts):\n if any(part is None for part in flat_parts):\n return None\n else:\n return tf.nest.pack_sequence_as(sample_transformation.dtype, flat_parts) # pylint: disable=cell-var-from-loop\n\n new_transformation = sample_transformation._replace(\n ground_truth_mean=_pack_or_none(flat_mean),\n ground_truth_mean_standard_error=_pack_or_none(flat_sem),\n ground_truth_standard_deviation=_pack_or_none(flat_std),\n ground_truth_standard_deviation_standard_error=_pack_or_none(\n flat_sestd),\n )\n sample_transformations[name] = new_transformation\n return sample_transformations\n\n\nclass Model:\n \"\"\"Base class for models in the Inference Gym.\n\n A model, at the minimum, describes its support and allows evaluating its\n un-normalized log density via the `unnormalized_log_prob` method.\n\n Given a Bayesian model conditioned on evidence, you can access the associated\n un-normalized density via the `unnormalized_log_prob` method.\n\n The dtype, shape, and constraints over the support are returned by the\n `dtype`, `event_shape`, and `default_event_space_bijector` properties. Note\n that `x` could be structured, in which case `dtype` and `shape` will be\n structured as well (parallel to the structure of `x`).\n `default_event_space_bijector` need not be structured, but could operate on\n the structured `x`. A generic way of constructing a random number that is\n within the event space of this model is to do:\n\n ```python\n model = LogisticRegression(...)\n unconstrained_values = tf.nest.map_structure(\n lambda d, s: tf.random.normal(s, dtype=d),\n model.dtype,\n model.event_shape,\n )\n constrained_values = tf.nest.map_structure_up_to(\n model.default_event_space_bijector,\n lambda b, v: b(v),\n model.default_event_space_bijector,\n unconstrained_values,\n )\n ```\n\n A model has two names. First, the `name` property is used for various name\n scopes inside the implementation of the model. Second, a pretty name which is\n meant to be suitable for a table inside a publication, accessed via the\n `__str__` method.\n\n Models come with associated sample transformations, which describe useful ways\n of looking at the samples from the posterior distribution. Each transformation\n optionally comes equipped with various ground truth values (computed\n analytically or via Monte Carlo averages). You can apply the transformations\n to samples from the model like so:\n\n ```python\n model = LogisticRegression(...)\n for name, sample_transformation in model.sample_transformations.items():\n transformed_samples = sample_transformation(samples)\n if sample_transformation.ground_truth_mean is not None:\n square_diff = tf.nest.map_structure(\n lambda gtm, sm: (gtm - tf.reduce_mean(sm, axis=0))**2,\n sample_transformation.ground_truth_mean,\n transformed_samples,\n )\n ```\n\n #### Examples\n\n A simple 2-variable model:\n\n ```python\n class SimpleModel(gym.targets.Model):\n\n def __init__(self):\n super(SimpleModel, self).__init__(\n default_event_space_bijector=tfb.Exp(),\n event_shape=[],\n dtype=tf.float32,\n name='simple_model',\n pretty_name='SimpleModel',\n sample_transformations=dict(\n identity=gym.targets.Model.SampleTransformation(\n fn=lambda x: x,\n pretty_name='Identity',\n ),),\n )\n\n def _unnormalized_log_prob(self, value):\n return 1. + value - tf.math.exp(value)\n ```\n\n We don't specify the ground truth values for the `identity` sample\n transformation for demonstration purposes, which you'd normally only do if\n they are not known analytically. See `GermanCreditNumericLogisticRegression`\n Bayesian model for an example of how to incorporate Monte-Carlo derived values\n for ground truth into a sample transformation.\n \"\"\"\n\n # Specify this to load the ground truths from this module.\n GROUND_TRUTH_MODULE = None\n\n def __init__(\n self,\n default_event_space_bijector,\n event_shape,\n dtype,\n name,\n pretty_name,\n sample_transformations,\n ):\n \"\"\"Constructs a Model.\n\n Args:\n default_event_space_bijector: A (nest of) bijectors that take\n unconstrained `R**n` tensors to the event space of the posterior.\n event_shape: A (nest of) shapes describing the samples from the posterior.\n dtype: A (nest of) dtypes describing the dtype of the posterior.\n name: Python `str` name prefixed to Ops created by this class.\n pretty_name: A Python `str`. The pretty name of this model.\n sample_transformations: A dictionary of Python strings to\n `SampleTransformation`s.\n \"\"\"\n self._default_event_space_bijector = default_event_space_bijector\n self._event_shape = event_shape\n self._dtype = dtype\n self._name = name\n self._pretty_name = pretty_name\n if not isinstance(sample_transformations, collections.OrderedDict):\n sample_transformations = collections.OrderedDict(\n sorted(sample_transformations.items()))\n if self.GROUND_TRUTH_MODULE is not None:\n sample_transformations = _populate_ground_truth(\n sample_transformations, module=self.GROUND_TRUTH_MODULE)\n self._sample_transformations = sample_transformations\n\n class SampleTransformation(\n collections.namedtuple('SampleTransformation', [\n 'fn',\n 'pretty_name',\n 'ground_truth_mean',\n 'ground_truth_mean_standard_error',\n 'ground_truth_standard_deviation',\n 'ground_truth_standard_deviation_standard_error',\n 'dtype',\n ])):\n \"\"\"A transformation of samples of the outer `BayesianModel`.\n\n Specifically, `E_{x~p}[f(x)]` for a model `p` and transformation `f`. The\n model `p` is implicit, in that the `SampleTransformation` appears in the\n `sample_transformations` field of that `BayesianModel`. The `f` is given as\n `fn` so that candidate samples may be passed through it. The `fn` may close\n over the parameters of `p`, and the `ground_truth_mean` will presumably\n depend on `p` implicitly via some sampling process.\n\n If the `ground_truth_mean` is estimated by sampling, then\n `ground_truth_standard_deviation` and `ground_truth_mean_standard_error` are\n related using the standard formula:\n ```none\n SEM = SD / sqrt(N)\n ```\n where `N` is the number of samples. `ground_truth_standard_deviation`\n describes the distribution of `f(x)`, while\n `ground_truth_mean_standard_error`\n desribes how accurately we know `ground_truth_mean`.\n\n Attributes:\n fn: Function that takes samples from the target and returns a (nest of)\n `Tensor`. The returned `Tensor` must retain the leading non-event\n dimensions.\n pretty_name: Human readable name, suitable for a table in a paper.\n ground_truth_mean: Ground truth value of this expectation. Can be `None`\n if not available. Default: `None`.\n ground_truth_mean_standard_error: Standard error of the ground truth mean.\n Can be `None` if not available. Default: `None`.\n ground_truth_standard_deviation: Standard deviation of samples transformed\n by `fn`. Can be `None` if not available. Default: `None`.\n ground_truth_standard_deviation_standard_error: Standard error of the\n ground truth standard deviation. Can be `None` if not available.\n Default: `None`.\n dtype: Possibly nested dtype of the output of `fn`. Default: `tf.float32`.\n\n #### Examples\n\n An identity `fn` for a vector-valued target would look like:\n\n ```python\n fn = lambda x: x\n ```\n \"\"\"\n\n __slots__ = ()\n\n def __call__(self, value):\n \"\"\"Returns `fn(value)`.\"\"\"\n return self.fn(value)\n\n def __str__(self):\n \"\"\"The prety name of this transformation.\"\"\"\n return self.pretty_name\n\n def _unnormalized_log_prob(self, value):\n raise NotImplementedError('_unnormalized_log_prob is not implemented.')\n\n def unnormalized_log_prob(self, value, name='unnormalized_log_prob'):\n \"\"\"The un-normalized log density of evaluated at a point.\n\n This corresponds to the target distribution associated with the model, often\n its posterior.\n\n Args:\n value: A (nest of) `Tensor` to evaluate the log density at.\n name: Python `str` name prefixed to Ops created by this method.\n\n Returns:\n unnormalized_log_prob: A floating point `Tensor`.\n \"\"\"\n with tf.name_scope(self.name):\n with tf.name_scope(name):\n return self._unnormalized_log_prob(value)\n\n @property\n def default_event_space_bijector(self):\n \"\"\"Bijector mapping the reals (R**n) to the event space of this model.\"\"\"\n return self._default_event_space_bijector\n\n @property\n def event_shape(self):\n \"\"\"Shape of a single sample from as a `TensorShape`.\n\n May be partially defined or unknown.\n\n Returns:\n event_shape: `TensorShape`, possibly unknown.\n \"\"\"\n return nest.map_structure_up_to(self.dtype, tf.TensorShape,\n self._event_shape)\n\n @property\n def dtype(self):\n \"\"\"The `DType` of `Tensor`s handled by this model.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Python `str` name prefixed to Ops created by this class.\"\"\"\n return self._name\n\n def __str__(self):\n \"\"\"The prety name of the model, suitable for a figure caption.\"\"\"\n return self._pretty_name\n\n @property\n def sample_transformations(self):\n \"\"\"A dictionary of names to `SampleTransformation`s.\"\"\"\n return self._sample_transformations\n\n\nModel.SampleTransformation.__new__.__defaults__ = (None, None, None,\n None, tf.float32)\n"
] | [
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.Variable",
"numpy.log",
"numpy.sqrt",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.eye",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.ones",
"numpy.array",
"tensorflow.compat.v2.nn.softplus",
"tensorflow.compat.v2.optimizers.Adam"
],
[
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.math.digamma",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.math.log1p",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.math.log"
],
[
"tensorflow.compat.v2.math.erf",
"numpy.sqrt",
"tensorflow.compat.v2.math.is_nan",
"tensorflow.compat.v2.math.ndtri",
"tensorflow.compat.v2.minimum",
"tensorflow.compat.v2.math.reciprocal_no_nan",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.math.abs",
"tensorflow.compat.v2.math.square",
"tensorflow.compat.v2.reduce_sum",
"numpy.exp",
"tensorflow.compat.v2.while_loop",
"tensorflow.compat.v2.math.exp",
"tensorflow.compat.v2.math.xlogy",
"tensorflow.compat.v2.math.is_inf",
"tensorflow.compat.v2.math.lgamma",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.reduce_any",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.abs",
"tensorflow.compat.v2.math.sign",
"tensorflow.compat.v2.reduce_all",
"numpy.log",
"tensorflow.compat.v2.function",
"tensorflow.compat.v2.math.atan2",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.math.digamma",
"tensorflow.compat.v2.math.igammac",
"tensorflow.compat.v2.raw_ops.BroadcastGradientArgs",
"tensorflow.compat.v2.math.sqrt",
"numpy.array",
"tensorflow.compat.v2.math.equal",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.math.reciprocal",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.ones_like",
"tensorflow.compat.v2.math.erfc",
"tensorflow.compat.v2.math.igamma",
"tensorflow.compat.v2.maximum",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.math.log1p",
"tensorflow.compat.v2.stop_gradient",
"tensorflow.compat.v2.math.atan",
"tensorflow.compat.v2.math.polyval",
"tensorflow.compat.v2.math.expm1"
],
[
"tensorflow.compat.v2.math.reduce_max",
"tensorflow.compat.v2.math.is_nan",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.math.abs",
"tensorflow.compat.v2.math.minimum",
"tensorflow.compat.v2.while_loop",
"tensorflow.compat.v2.math.is_inf",
"tensorflow.compat.v2.math.reduce_min",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.math.reduce_sum",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v1.where",
"tensorflow.compat.v2.size",
"tensorflow.compat.v2.math.maximum",
"tensorflow.compat.v2.math.equal",
"tensorflow.compat.v2.concat"
],
[
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.clip_by_value",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.sqrt",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.linalg.matmul",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.eye",
"tensorflow.compat.v2.math.rsqrt"
],
[
"tensorflow.compat.v2.debugging.assert_equal",
"tensorflow.compat.v2.get_static_value",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.python.util.deprecation.deprecated_arg_values",
"tensorflow.compat.v2.TensorShape"
],
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.as_dtype",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.where",
"numpy.finfo",
"tensorflow.compat.v2.math.is_finite",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.reduce_any",
"tensorflow.compat.v2.math.nextafter",
"numpy.float32",
"tensorflow.compat.v2.math.abs",
"tensorflow.compat.v2.Print",
"tensorflow.compat.v2.while_loop"
],
[
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.logical_not",
"tensorflow.compat.v2.keras.layers.BatchNormalization",
"tensorflow.compat.v2.sqrt",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.nn.relu",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.nn.moments"
],
[
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.python.util.nest.flatten_with_tuple_paths",
"tensorflow.compat.v2.name_scope",
"tensorflow.python.util.nest.map_structure_up_to"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"1.2",
"2.10"
]
}
] |
phgupta/XBOS | [
"1fea0b024d97ae142d97b3a94510403928ed44b7",
"1fea0b024d97ae142d97b3a94510403928ed44b7"
] | [
"services/occupancy/server.py",
"services/indoor_temperature_prediction/server.py"
] | [
"from concurrent import futures\nimport time\nimport grpc\nimport logging\nlogging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S', level=logging.DEBUG)\nimport occupancy_pb2\nimport occupancy_pb2_grpc\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\nimport os\nimport xbos_services_utils3 as utils\nimport datetime\nimport pytz\nimport numpy as np\nimport pandas as pd\nimport yaml\n\nDAYS_IN_WEEK = 7\nOCCUPANCY_DATA_PATH = os.environ[\"OCCUPANCY_DATA_PATH\"]\nOCCUPANCY_HOST_ADDRESS = os.environ[\"OCCUPANCY_HOST_ADDRESS\"]\n\ndef _get_occupancy_config(building, zone):\n occ_path = OCCUPANCY_DATA_PATH + \"/\" + building + \"/\" + zone + \".yml\"\n\n if os.path.exists(occ_path):\n with open(occ_path, \"r\") as f:\n try:\n config = yaml.load(f)\n except yaml.YAMLError:\n return None, \"yaml could not read file at: %s\" % occ_path\n else:\n return None, \"occupancy file could not be found. path: %s.\" % occ_path\n\n return config, None\n\n\ndef _get_week_occupancy(building, zone, date, interval):\n \"\"\"\n Gets the occupancy from the zone configuration file. Correctly Resamples the data according to interval\n :param date: The date for which we want to start the week. Timezone aware.\n :param interval: int:seconds. The interval/frequency of resampling.\n :return: pd.Series with time_series index in timezone of building.\n \"\"\"\n config, err = _get_occupancy_config(building, zone)\n if config is None:\n return None, err\n\n # Set the date to the controller timezone.\n building_date = date.astimezone(tz=pytz.timezone(config[\"tz\"]))\n weekday = building_date.weekday()\n\n list_occ_data = []\n\n occ_data = config[\"occupancy\"]\n\n # Note, we need to get a day before the start and after the end of the week to correctly resample due to timezones.\n for i in range(DAYS_IN_WEEK + 2):\n curr_weekday = (weekday + i - 1) % DAYS_IN_WEEK\n curr_day = building_date + datetime.timedelta(days=i - 1)\n\n curr_idx = []\n curr_occ = []\n\n date_occupancy = np.array(occ_data[curr_weekday])\n\n for interval_occupancy in date_occupancy:\n start, end, occ = interval_occupancy\n start = utils.combine_date_time(start, curr_day)\n\n occ = float(occ)\n curr_idx.append(start)\n curr_occ.append(occ)\n\n list_occ_data.append(pd.Series(index=curr_idx, data=curr_occ))\n\n series_occ = pd.concat(list_occ_data)\n\n series_occ = series_occ.tz_convert(date.tzinfo)\n\n # decrements in interval-steps till beginning of day of date.\n decremented_date = utils.decrement_to_start_of_day(date, interval)\n\n series_occ = utils.smart_resample(series_occ, decremented_date, decremented_date + datetime.timedelta(days=7),\n interval, \"pad\")\n\n return series_occ, None\n\n\ndef get_all_occ(building, zone, start, end, interval):\n \"\"\"\n Gets the occupancy of a zone from start to end in the given interval.\n :param building: string\n :param zone: string\n :param start: datetime. timezone aware\n :param end: datetime. timezone aware.\n :param interval: int:seconds. seconds_in_day % interval == 0\n :return:\n\n NOTE: If (end-start).total_seconds % interval != 0, then make new_end such that new_end < end and\n the condition is satisfied. New_end will also not be inclusive.\n \"\"\"\n\n first_seven_days, err = _get_week_occupancy(building, zone, start, interval)\n if first_seven_days is None:\n return None, err\n\n first_seven_days_start = first_seven_days.index[0]\n first_seven_days_end = first_seven_days_start + datetime.timedelta(days=DAYS_IN_WEEK)\n\n if end < first_seven_days_end:\n return first_seven_days[start:end][:-1], None\n\n # get occupancy for the remaining days.\n remaining_data = []\n\n for i in range((end - first_seven_days_end).days + 1):\n curr_offset = i % DAYS_IN_WEEK\n\n curr_time = first_seven_days_end + datetime.timedelta(days=i)\n\n curr_data = first_seven_days[first_seven_days_start + datetime.timedelta(days=curr_offset):\n first_seven_days_start + datetime.timedelta(days=curr_offset + 1)][\n :int(24 * 60 * 60 / interval)]\n\n curr_start_date = curr_time\n curr_end_date = curr_start_date + datetime.timedelta(days=1)\n date_range = pd.date_range(start=curr_start_date, end=curr_end_date, freq=str(interval) + \"S\")[:-1]\n curr_data.index = date_range\n\n remaining_data.append(curr_data)\n\n occupancy_series = pd.concat([first_seven_days] + remaining_data)\n\n return occupancy_series[start:end][:-1], None\n\n\ndef get_occupancy(request):\n \"\"\"Returns preprocessed thermal data for a given request or None.\"\"\"\n logging.info(\"received request:\", request.building, request.zone, request.start, request.end, request.window)\n window_seconds = utils.get_window_in_sec(request.window)\n\n request_length = [len(request.building), len(request.zone), request.start, request.end,\n window_seconds]\n\n if any(v == 0 for v in request_length):\n return None, \"invalid request, empty params\"\n if request.start >= request.end:\n return None, \"invalid request, start date is after end date.\"\n if request.start < 0 or request.end < 0:\n return None, \"invalid request, negative dates\"\n if request.start + (window_seconds * 1e9) > request.end:\n return None, \"invalid request, start date + window is greater than end date\"\n if 60 * 60 % window_seconds != 0:\n return None, \"window does not evenly divide a day (seconds_in_day % window != 0).\"\n\n start_datetime = datetime.datetime.utcfromtimestamp(\n float(request.start / 1e9)).replace(tzinfo=pytz.utc)\n end_datetime = datetime.datetime.utcfromtimestamp(\n float(request.end / 1e9)).replace(tzinfo=pytz.utc)\n\n all_occupancy, err = get_all_occ(request.building, request.zone, start_datetime, end_datetime, window_seconds)\n if all_occupancy is None:\n return [occupancy_pb2.OccupancyPoint()], err\n\n grpc_occ = []\n for idx, row in all_occupancy.iteritems():\n grpc_occ.append(\n occupancy_pb2.OccupancyPoint(time=int(idx.timestamp() * 1e9), occupancy=row))\n return grpc_occ , None\n # return occupancy_pb2.OccupancyReply(occupancies=grpc_occ), None\n\n\nclass OccupancyServicer(occupancy_pb2_grpc.OccupancyServicer):\n def __init__(self):\n pass\n\n def GetOccupancy(self, request, context):\n \"\"\"A simple RPC.\n\n Sends the outside temperature for a given building, within a duration (start, end), and a requested window\n An error is returned if there are no temperature for the given request\n \"\"\"\n occupancy, error = get_occupancy(request)\n if occupancy is None:\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(error)\n return occupancy_pb2.OccupancyPoint()\n elif error is not None:\n context.set_code(grpc.StatusCode.UNAVAILABLE)\n context.set_details(error)\n\n for occ in occupancy:\n yield occ\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n occupancy_pb2_grpc.add_OccupancyServicer_to_server(OccupancyServicer(), server)\n server.add_insecure_port(OCCUPANCY_HOST_ADDRESS)\n logging.info(\"Serving on {0} with data path {1}\".format(OCCUPANCY_HOST_ADDRESS, OCCUPANCY_DATA_PATH))\n server.start()\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n\nif __name__ == '__main__':\n serve()\n",
"import os, sys\n\nimport datetime\nimport pytz\nimport pandas as pd\nimport numpy as np\n\nfrom concurrent import futures\nimport time\nimport grpc\nimport logging\nlogging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S', level=logging.DEBUG)\n\nimport indoor_temperature_prediction_pb2\nimport indoor_temperature_prediction_pb2_grpc\n\nimport create_models as ctm\n\nimport xbos_services_getter as xsg\n\nHOST_ADDRESS = os.environ[\"INDOOR_TEMPERATURE_PREDICTION_HOST_ADDRESS\"]\n\n_INTERVAL = \"5m\" # minutes # TODO allow for getting multiples of 5. Prediction horizon.\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\nTHERMAL_MODELS = {}\n\nEND = datetime.datetime(year=2019, month=4, day=1).replace(\n tzinfo=pytz.utc) # datetime.datetime.utcnow().replace(tzinfo=pytz.utc) # TODO make environ var.\nSTART = END - datetime.timedelta(days=365)\n\n\ndef get_window_in_sec(s):\n \"\"\"Returns number of seconds in a given duration or zero if it fails.\n Supported durations are seconds (s), minutes (m), hours (h), and days(d).\"\"\"\n seconds_per_unit = {\"s\": 1, \"m\": 60, \"h\": 3600, \"d\": 86400}\n try:\n return int(float(s[:-1])) * seconds_per_unit[s[-1]]\n except:\n return 0\n\n\ndef initizalize():\n building_zone_names_stub = xsg.get_building_zone_names_stub()\n all_building_zone_names = xsg.get_all_buildings_zones(building_zone_names_stub)\n for building in all_building_zone_names.keys():\n logging.info(\"Initizalizing building:\", building)\n for zone in all_building_zone_names[building]:\n logging.info(\"Zone:\", zone)\n _, err = check_thermal_model(building, zone)\n if err is not None:\n logging.error(\"Error: \" + err)\n print(\"\")\n\n\ndef check_thermal_model(building, zone):\n if building not in THERMAL_MODELS or zone not in THERMAL_MODELS[building]:\n # TODO ERROR CHECK.\n thermal_model, column_order, err = training(building, zone, START, END)\n if err is not None:\n return None, err\n if building not in THERMAL_MODELS:\n THERMAL_MODELS[building] = {}\n THERMAL_MODELS[building][zone] = (thermal_model, column_order)\n\n return None, None\n\n\ndef training(building, zone, start, end):\n \"\"\"\n\n :param building: (str) building name\n :param zone: (str) zone name\n :param start: (datetime timezone aware)\n :param end: (datetime timezone aware)\n :return: Trained thermal model object.\n \"\"\"\n # TODO add more error checking here goddamn\n model, column_order, err = ctm.create_model(building=building,\n zone=zone,\n start=start,\n end=end,\n prediction_window=_INTERVAL,\n raw_data_granularity=\"1m\",\n train_ratio=1,\n is_second_order=True,\n use_occupancy=False,\n curr_action_timesteps=0,\n prev_action_timesteps=-1,\n method=\"OLS\",\n check_data=False) # change this as needed.\n if err is not None:\n return None, None, err\n return model, column_order, None\n\n\ndef get_error(request):\n \"\"\"Gets error for prediction + error = label.\n\n :return: error_reply, err\n \"\"\"\n logging.info(\"received request:\", request.building, request.zone, request.action, request.start, request.end, request.unit)\n\n request_length = [len(request.building), len(request.zone), request.start,\n request.end,\n len(request.unit)]\n\n if any(v == 0 for v in request_length):\n return None, \"invalid request, empty params\"\n if not (0 <= request.action <= 2):\n return None, \"invalid request, action is not between 0 and 2.\"\n if request.unit != \"F\":\n return None, \"invalid request, only support 'F' unit.\"\n\n # TODO Check if valid building/zone/temperature unit/zone, outside and indoor temperature (not none)\n\n start = datetime.datetime.utcfromtimestamp(float(request.start / 1e9)).replace(\n tzinfo=pytz.utc)\n end = datetime.datetime.utcfromtimestamp(float(request.end / 1e9)).replace(\n tzinfo=pytz.utc)\n\n # checking if we have a thermal model, and training if necessary.\n _, err = check_thermal_model(request.building, request.zone)\n if err is not None:\n return None, \"No valid Thermal Model. (\" + err + \")\"\n\n train_X, train_y, _, _, err = ctm.get_train_test(building=request.building,\n zone=request.zone,\n start=start,\n end=end,\n prediction_window=_INTERVAL,\n raw_data_granularity=\"1m\",\n train_ratio=1,\n is_second_order=True,\n use_occupancy=False,\n curr_action_timesteps=0,\n prev_action_timesteps=-1,\n check_data=False)\n\n if err is not None:\n return None, err\n\n thermal_model, column_order = THERMAL_MODELS[request.building][request.zone]\n if request.action != -1:\n filter = train_X[\"action\"] == request.action\n train_X = train_X[filter]\n train_y = train_y[filter]\n\n if train_X.shape[0] == 0:\n return None, \"Not enough data for given action to get error.\"\n\n predictions_train = thermal_model.predict(train_X)\n error = (train_y.values - predictions_train)\n\n err_mean, err_var = np.mean(error), np.var(error)\n\n error_reply = indoor_temperature_prediction_pb2.ErrorReply(\n mean=err_mean,\n var=err_var,\n unit=\"F\")\n return error_reply, None\n\n\ndef prediction(request):\n \"\"\"Returns temperature prediction for a given request or None.\"\"\"\n\n logging.info(\"received request:\", request.building, request.zone, request.current_time,\n request.indoor_temperature, request.outside_temperature, request.other_zone_temperatures,\n request.temperature_unit)\n\n request_length = [len(request.building), len(request.zone), request.current_time,\n request.indoor_temperature, request.outside_temperature, request.other_zone_temperatures,\n len(request.temperature_unit)]\n\n unit = \"F\" # fahrenheit for now .\n\n if any(v == 0 for v in request_length):\n return None, \"invalid request, empty params\"\n if not (0 <= request.action <= 2):\n return None, \"Action is not between 0 and 2.\"\n\n # TODO Check if valid building/zone/temperature unit/zone, outside and indoor temperature (not none)\n\n current_time = datetime.datetime.utcfromtimestamp(float(request.current_time / 1e9)).replace(\n tzinfo=pytz.utc)\n\n # checking if we have a thermal model, and training if necessary.\n _, err = check_thermal_model(request.building, request.zone)\n if err is not None:\n return None, \"No valid Thermal Model. (\" + err + \")\"\n thermal_model, column_order = THERMAL_MODELS[request.building][request.zone]\n data_point = {\n \"t_in\": request.indoor_temperature,\n \"action\": request.action,\n \"t_out\": request.outside_temperature,\n \"dt\": get_window_in_sec(_INTERVAL),\n \"t_prev\": request.previous_indoor_temperature # TODO t_last feature should be added to proto specs\n }\n\n for iter_zone, iter_temp in request.other_zone_temperatures.items():\n if iter_zone != request.zone:\n data_point[\"temperature_zone_\" + iter_zone] = iter_temp\n\n data_point = pd.DataFrame(data=[data_point], index=[current_time])[column_order]\n\n prediction = thermal_model.predict(data_point)\n\n prediction_reply = indoor_temperature_prediction_pb2.PredictedTemperatureReply(\n time=int(request.current_time + get_window_in_sec(_INTERVAL) * 1e9),\n temperature=prediction[0],\n unit=unit)\n return prediction_reply, None\n\n\nclass IndoorTemperaturePredictionServicer(indoor_temperature_prediction_pb2_grpc.IndoorTemperaturePredictionServicer):\n def __init__(self):\n pass\n\n def GetSecondOrderPrediction(self, request, context):\n \"\"\"A simple RPC.\n \"\"\"\n predicted_temperature, error = prediction(request)\n if predicted_temperature is None:\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(error)\n return indoor_temperature_prediction_pb2.PredictedTemperatureReply()\n else:\n return predicted_temperature\n\n def GetSecondOrderError(self, request, context):\n \"\"\"A simple RPC.\n\n \"\"\"\n error_reply, error = get_error(request)\n if error_reply is None:\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(error)\n return indoor_temperature_prediction_pb2.PredictedTemperatureReply()\n else:\n return error_reply\n\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n indoor_temperature_prediction_pb2_grpc.add_IndoorTemperaturePredictionServicer_to_server(\n IndoorTemperaturePredictionServicer(), server)\n server.add_insecure_port(HOST_ADDRESS)\n logging.info(\"Serving on {0}\".format(HOST_ADDRESS))\n server.start()\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n\nif __name__ == '__main__':\n logging.info(\"Initializing\")\n initizalize()\n serve()\n #\n # building = 'ciee'\n # zone = \"HVAC_Zone_Northzone\"\n #\n # end = datetime.datetime.now()\n # start = end - datetime.timedelta(hours=2)\n #\n #\n # print(get_zones(building, hod_client))\n #\n # print(training(building, zone, mdal_client, hod_client,\n # int(time.mktime(start.timetuple())*1e9),\n # int(time.mktime(end.timetuple()) * 1e9)))\n"
] | [
[
"pandas.concat",
"numpy.array",
"pandas.Series"
],
[
"numpy.var",
"numpy.mean",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
xionghuichen/RLAssistant | [
"efbde6609cfbd60646f935b450dac65bcaa340e6"
] | [
"RLA/easy_log/tester.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n\n# Author : Xionghui Chen\n# Created : 2017-11-12\n# Modified : 2017-11-12\n# Version : 1.0\nfrom collections import deque\nimport dill\nimport time\nimport os\n\nimport datetime\nimport os.path as osp\nfrom RLA.easy_log.const import *\nfrom RLA.easy_log.time_step import time_step_holder\nfrom RLA.easy_log import logger\nfrom RLA.easy_log.const import *\nimport yaml\nimport shutil\nimport argparse\n\n\nclass ExperimentLoader(object):\n def __init__(self):\n self.task_name = None\n self.record_date = None\n self.root = None\n self.inherit_hp = None\n pass\n\n def config(self, task_name, record_date, root, inherit_hp):\n self.task_name = task_name\n self.record_date = record_date\n self.root = root\n self.inherit_hp = inherit_hp\n\n @property\n def is_valid_config(self):\n if self.record_date is not None and self.task_name is not None and self.root is not None:\n return True\n else:\n logger.warn(\"meet invalid loader config when use it\")\n logger.warn(\"record_date\", self.record_date)\n logger.warn(\"task_name\", self.task_name)\n logger.warn(\"root\", self.root)\n return False\n\n def import_hyper_parameters(self):\n if self.is_valid_config:\n load_tester = Tester.load_tester(self.record_date, self.task_name, self.root)\n args = argparse.Namespace(**load_tester.hyper_param)\n return args\n else:\n return None\n\n def load_from_record_date(self, var_prefix=''):\n if self.is_valid_config:\n loaded_tester = Tester.load_tester(self.record_date, self.task_name, self.root)\n # load checkpoint\n loaded_tester.new_saver(var_prefix=var_prefix, max_to_keep=1)\n load_iter, load_res = loaded_tester.load_checkpoint()\n tester.time_step_holder.set_time(load_iter)\n tester.print_log_dir()\n if self.inherit_hp:\n return load_iter, load_res\n else:\n return 0, load_res\n else:\n return 0, {}\n\n def fork_tester_log_files(self):\n \"\"\"\n copy the log files in task_name/record_date to the new experiment.\n :param task_name:\n :param record_date:\n :return:\n \"\"\"\n if self.is_valid_config:\n global tester\n assert isinstance(tester, Tester)\n loaded_tester = Tester.load_tester(self.record_date, self.task_name, self.root)\n # copy log file\n tester.log_file_copy(loaded_tester)\n # copy attribute\n tester.hyper_param = loaded_tester.hyper_param\n tester.hyper_param_record = loaded_tester.hyper_param_record\n tester.private_config = loaded_tester.private_config\n\nexperimental_loader = ExperimentLoader()\n\ndef import_hyper_parameters(task_name, record_date):\n \"\"\"\n return the hyper parameters of the experiment in task_name/record_date, which is stored in Tester.\n\n :param task_name:\n :param record_date:\n :return:\n \"\"\"\n logger.warn(\"the function is deprecated. please check the ExperimentLoader as the new implementation\")\n global tester\n assert isinstance(tester, Tester)\n load_tester = tester.load_tester(record_date, task_name, tester.root)\n\n args = argparse.Namespace(**load_tester.hyper_param)\n return args\n\n\ndef load_from_record_date(task_name, record_date):\n \"\"\"\n load the checkpoint of the experiment in task_name/record_date.\n :param task_name:\n :param record_date:\n :return:\n \"\"\"\n logger.warn(\"the function is deprecated. please check the ExperimentLoader as the new implementation\")\n global tester\n assert isinstance(tester, Tester)\n load_tester = tester.load_tester(record_date, task_name, tester.root)\n # load checkpoint\n load_tester.new_saver(var_prefix='', max_to_keep=1)\n load_iter, load_res = load_tester.load_checkpoint()\n tester.time_step_holder.set_time(load_iter)\n tester.print_log_dir()\n return load_iter, load_res\n\n\ndef fork_tester_log_files(task_name, record_date):\n \"\"\"\n copy the log files in task_name/record_date to the new experiment.\n :param task_name:\n :param record_date:\n :return:\n \"\"\"\n logger.warn(\"the function is deprecated. please check the ExperimentLoader as the new implementation\")\n global tester\n assert isinstance(tester, Tester)\n load_tester = tester.load_tester(record_date, task_name, tester.root)\n # copy log file\n tester.log_file_copy(load_tester)\n # copy attribute\n tester.hyper_param = load_tester.hyper_param\n tester.hyper_param_record = load_tester.hyper_param_record\n tester.private_config = load_tester.private_config\n\nclass Tester(object):\n\n def __init__(self):\n self.__custom_recorder = {}\n self.__ipaddr = None\n self.custom_data = {}\n self.time_step_holder = time_step_holder\n self.hyper_param = {}\n self.strftims = None\n self.private_config = None\n self.last_record_fph_time = None\n self.hyper_param_record = []\n self.metadata_list = []\n self.summary_add_dict = {}\n self._rc_start_time = {}\n self.pkl_dir = None\n self.checkpoint_dir = None\n self.pkl_file = None\n self.results_dir = None\n self.log_dir = None\n self.code_dir = None\n self.saver = None\n self.dl_framework = None\n\n def configure(self, task_name, private_config_path, log_root, run_file=None):\n \"\"\"\n\n :param task_name:\n :param private_config_path:\n :return:\n \"\"\"\n fs = open(private_config_path, encoding=\"UTF-8\")\n self.private_config = yaml.load(fs)\n self.run_file = run_file\n self.task_name = task_name\n self.root = log_root\n logger.info(\"private_config: \")\n self.dl_framework = self.private_config[\"DL_FRAMEWORK\"]\n self.project_root = \"/\".join(private_config_path.split(\"/\")[:-1])\n for k, v in self.private_config.items():\n logger.info(\"k: {}, v: {}\".format(k, v))\n\n def set_hyper_param(self, **argkw):\n \"\"\"\n This method is to record all of hyper parameters to test object.\n\n Place pass your parameters as follow format:\n self.set_hyper_param(param_a=a,param_b=b)\n\n Note: It is invalid to pass a local object to this function.\n\n Parameters\n ----------\n argkw : key-value\n for example: self.set_hyper_param(param_a=a,param_b=b)\n\n \"\"\"\n self.hyper_param = argkw\n\n def update_hyper_param(self, k, v):\n self.hyper_param[k] = v\n\n def clear_record_param(self):\n self.hyper_param_record = []\n\n def log_files_gen(self):\n info = None\n self.record_date = datetime.datetime.now()\n logger.info(\"gen log files for record date : {}\".format(self.record_date))\n if info is None:\n info = self.auto_parse_info()\n info = '&' + info\n self.info = info\n code_dir, _ = self.__create_file_directory(osp.join(self.root, CODE, self.task_name), '', is_file=False)\n log_dir, _ = self.__create_file_directory(osp.join(self.root, LOG, self.task_name), '', is_file=False)\n self.pkl_dir, self.pkl_file = self.__create_file_directory(osp.join(self.root, ARCHIVE_TESTER, self.task_name), '.pkl')\n self.checkpoint_dir, _ = self.__create_file_directory(osp.join(self.root, CHECKPOINT, self.task_name), is_file=False)\n self.results_dir, _ = self.__create_file_directory(osp.join(self.root, OTHER_RESULTS, self.task_name), is_file=False)\n self.log_dir = log_dir\n self.code_dir = code_dir\n\n self._init_logger()\n self.serialize_object_and_save()\n self.__copy_source_code(self.run_file, code_dir)\n self._feed_hyper_params_to_tb()\n self.print_log_dir()\n\n def update_log_files_location(self, root):\n self.root = root\n code_dir, _ = self.__create_file_directory(osp.join(self.root, CODE, self.task_name), '', is_file=False)\n log_dir, _ = self.__create_file_directory(osp.join(self.root, LOG, self.task_name), '', is_file=False)\n self.pkl_dir, self.pkl_file = self.__create_file_directory(osp.join(self.root, ARCHIVE_TESTER, self.task_name), '.pkl')\n self.checkpoint_dir, _ = self.__create_file_directory(osp.join(self.root, CHECKPOINT, self.task_name), is_file=False)\n self.results_dir, _ = self.__create_file_directory(osp.join(self.root, OTHER_RESULTS, self.task_name), is_file=False)\n self.log_dir = log_dir\n self.code_dir = code_dir\n self.print_log_dir()\n\n def _init_logger(self):\n self.writer = None\n # logger configure\n logger.info(\"store file %s\" % self.pkl_file)\n logger.configure(self.log_dir, self.private_config[\"LOG_USED\"])\n for fmt in logger.Logger.CURRENT.output_formats:\n if isinstance(fmt, logger.TensorBoardOutputFormat):\n self.writer = fmt.writer\n if \"tensorboard\" not in self.private_config[\"LOG_USED\"]:\n time_step_holder.config(0, 0, tf_log=False)\n\n def log_file_copy(self, source_tester):\n assert isinstance(source_tester, Tester)\n shutil.rmtree(self.checkpoint_dir)\n shutil.copytree(source_tester.checkpoint_dir, self.checkpoint_dir)\n if os.path.exists(source_tester.results_dir):\n shutil.rmtree(self.results_dir)\n shutil.copytree(source_tester.results_dir, self.results_dir)\n else:\n logger.warn(\"[load warning]: can not find results dir\")\n if os.path.exists(source_tester.log_dir):\n shutil.rmtree(self.log_dir)\n shutil.copytree(source_tester.log_dir, self.log_dir)\n else:\n logger.warn(\"[load warning]: can not find log dir\")\n self._init_logger()\n\n def task_gen(self, task_pattern_list):\n return '-'.join(task_pattern_list)\n\n def print_log_dir(self):\n logger.info(\"log dir: {}\".format(self.log_dir))\n logger.info(\"pkl_file: {}\".format(self.pkl_file))\n logger.info(\"checkpoint_dir: {}\".format(self.checkpoint_dir))\n logger.info(\"results_dir: {}\".format(self.results_dir))\n\n @classmethod\n def load_tester(cls, record_date, task_name, log_root):\n logger.info(\"load tester\")\n res_dir, res_file = cls.log_file_finder(record_date, task_name=task_name,\n file_root=osp.join(log_root, ARCHIVE_TESTER),\n log_type='files')\n import dill\n load_tester = dill.load(open(osp.join(res_dir, res_file), 'rb'))\n assert isinstance(load_tester, Tester)\n logger.info(\"update log files' root\")\n load_tester.update_log_files_location(root=log_root)\n return load_tester\n\n\n def add_record_param(self, keys):\n for k in keys:\n if '.' in k:\n try:\n sub_k_list = k.split('.')\n v = self.hyper_param[sub_k_list[0]]\n for sub_k in sub_k_list[1:]:\n v = v[sub_k]\n self.hyper_param_record.append(str(k) + '=' + str(v).replace('[', '{').replace(']', '}').replace('/', '_'))\n except KeyError as e:\n print(\"do not include dot ('.') in your hyperparemeter name\")\n else:\n self.hyper_param_record.append(str(k) + '=' + str(self.hyper_param[k]).replace('[', '{').replace(']', '}').replace('/', '_'))\n\n def add_summary_to_logger(self, summary, name='', simple_val=False, freq=20):\n if \"tensorboard\" not in self.private_config[\"LOG_USED\"]:\n logger.info(\"skip adding summary to tb\")\n return\n if name not in self.summary_add_dict:\n self.summary_add_dict[name] = []\n if freq > 0:\n summary_ts = int(self.time_step_holder.get_time() / freq)\n else:\n summary_ts = 0\n if freq <= 0 or summary_ts not in self.summary_add_dict[name]:\n from tensorflow.core.framework import summary_pb2\n summ = summary_pb2.Summary()\n summ.ParseFromString(summary)\n if simple_val:\n list_field = summ.ListFields()\n\n def recursion_util(inp_field):\n if hasattr(inp_field, \"__getitem__\"):\n for inp in inp_field:\n recursion_util(inp)\n elif hasattr(inp_field, 'simple_value'):\n logger.record_tabular(name + '/' + inp_field.tag, inp_field.simple_value)\n else:\n pass\n recursion_util(list_field)\n logger.dump_tabular()\n else:\n self.writer.add_summary(summary, self.time_step_holder.get_time())\n self.writer.flush()\n self.summary_add_dict[name].append(summary_ts)\n\n def _feed_hyper_params_to_tb(self):\n if \"tensorboard\" not in self.private_config[\"LOG_USED\"]:\n logger.info(\"skip feeding hyper-param to tb\")\n return\n\n import tensorflow as tf\n with tf.Session(graph=tf.Graph()) as sess:\n hyperparameters = [tf.convert_to_tensor([k, str(v)]) for k, v in self.hyper_param.items()]\n summary = sess.run(tf.summary.text('hyperparameters', tf.stack(hyperparameters)))\n self.add_summary_to_logger(summary, 'hyperparameters', freq=1)\n\n def sync_log_file(self):\n \"\"\"\n syn_log_file is an automatic synchronization function.\n It will send all log files (e.g., code/**, checkpoint/**, log/**, etc.) to your target server via the FTP protocol.\n To run this function, you should add some configuration on SRG.private_config.py\n\n SEND_LOG_FILE: boolean. denotes synchronization or not.\n ftp_server: target server ip address\n username: username of target server\n password: password of target server\n remote_porject_dir: log root of target server, e.g., \"/Project/SRG/SRG/var_gan_imitation/\"\n\n :return:\n \"\"\"\n\n logger.warn(\"sync: start\")\n # ignore_files = self.private_config[\"IGNORE_RULE\"]\n if self.private_config[\"SEND_LOG_FILE\"]:\n from RLA.auto_ftp import FTPHandler\n try:\n ftp = FTPHandler(ftp_server=self.private_config[\"REMOTE_SETTING\"][\"ftp_server\"],\n username=self.private_config[\"REMOTE_SETTING\"][\"username\"],\n password=self.private_config[\"REMOTE_SETTING\"][\"password\"])\n for root, dirs, files in os.walk(self.log_dir):\n suffix = root.split(\"/{}/\".format(LOG))\n assert len(suffix) == 2, \"root should have only one pattern \\\"/log/\\\"\"\n remote_root = osp.join(self.private_config[\"REMOTE_SETTING\"][\"remote_log_root\"], LOG, suffix[1])\n local_root = root\n logger.warn(\"sync {} <- {}\".format(remote_root, local_root))\n for file in files:\n ftp.upload_file(remote_root, local_root, file)\n # for root, dirs, files in os.walk(self.code_dir):\n # remote_root = osp.join(self.private_config.remote_porject_dir, root[3:])\n # local_root = root\n # logger.warn(\"sync {} <- {}\".format(remote_root, local_root))\n # for file in files:\n # ftp.upload_file(remote_root, local_root, file)\n # for root, dirs, files in os.walk(self.checkpoint_dir):\n # for file in files:\n # ftp.upload_file(remote_porject_dir + root[2:], root + '/', file)\n\n logger.warn(\"sync: send success!\")\n except Exception as e:\n logger.warn(\"sending log file failed. {}\".format(e))\n import traceback\n logger.warn(traceback.format_exc())\n\n @classmethod\n def log_file_finder(cls, record_date, task_name='train', file_root='../checkpoint/', log_type='dir'):\n record_date = datetime.datetime.strptime(record_date, '%Y/%m/%d/%H-%M-%S-%f')\n prefix = osp.join(file_root, task_name)\n directory = str(record_date.strftime(\"%Y/%m/%d\"))\n directory = osp.join(prefix, directory)\n file_found = ''\n for root, dirs, files in os.walk(directory):\n if log_type == 'dir':\n search_list = dirs\n elif log_type =='files':\n search_list =files\n else:\n raise NotImplementedError\n for search_item in search_list:\n if search_item.startswith(str(record_date.strftime(\"%H-%M-%S-%f\"))):\n split_dir = search_item.split(' ')\n # self.__ipaddr = split_dir[1]\n info = \" \".join(split_dir[2:])\n logger.info(\"load data: \\n ts {}, \\n ip {}, \\n info {}\".format(split_dir[0], split_dir[1], info))\n file_found = search_item\n break\n return directory, file_found\n\n @property\n def ipaddr(self):\n if self.__ipaddr is None:\n self.__ipaddr = self.__gen_ip()\n return self.__ipaddr\n\n def __gen_ip(self):\n try:\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"1.1.1.1\", 80))\n ip = s.getsockname()[0]\n s.close()\n except Exception as e:\n ip = 'noip'\n return ip\n\n def __copy_source_code(self, run_file, code_dir):\n import shutil\n if self.private_config[\"PROJECT_TYPE\"][\"backup_code_by\"] == 'lib':\n assert os.listdir(code_dir) == []\n os.removedirs(code_dir)\n shutil.copytree(osp.join(self.project_root, self.private_config[\"BACKUP_CONFIG\"][\"lib_dir\"]), code_dir)\n assert run_file is not None, \"you should define the run_file in lib backup mode.\"\n shutil.copy(run_file, code_dir)\n elif self.private_config[\"PROJECT_TYPE\"][\"backup_code_by\"] == 'source':\n for dir_name in self.private_config[\"BACKUP_CONFIG\"][\"backup_code_dir\"]:\n shutil.copytree(osp.join(self.project_root, dir_name), osp.join(code_dir, dir_name))\n else:\n raise NotImplementedError\n\n def record_date_to_str(self, record_date):\n return str(record_date.strftime(\"%H-%M-%S-%f\"))\n\n def __create_file_directory(self, prefix, ext='', is_file=True, record_date=None):\n if record_date is None:\n record_date = self.record_date\n directory = str(record_date.strftime(\"%Y/%m/%d\"))\n directory = osp.join(prefix, directory)\n if is_file:\n os.makedirs(directory, exist_ok=True)\n file_name = '{dir}/{timestep} {ip} {info}{ext}'.format(dir=directory,\n timestep=self.record_date_to_str(record_date),\n ip=str(self.ipaddr),\n info=self.info,\n ext=ext)\n else:\n directory = '{dir}/{timestep} {ip} {info}{ext}/'.format(dir=directory,\n timestep=self.record_date_to_str(record_date),\n ip=str(self.ipaddr),\n info=self.info,\n ext=ext)\n os.makedirs(directory, exist_ok=True)\n file_name = ''\n return directory, file_name\n\n def update_fph(self, cum_epochs):\n if self.last_record_fph_time is None:\n self.last_record_fph_time = time.time()\n else:\n cur_time = time.time()\n duration = (cur_time - self.last_record_fph_time) / 60 / 60\n fph = cum_epochs / duration\n logger.record_tabular('fph', fph)\n # self.last_record_fph_time = cur_time\n logger.dump_tabular()\n\n def time_record(self, name):\n assert name not in self._rc_start_time\n self._rc_start_time[name] = time.time()\n\n def time_record_end(self, name):\n end_time = time.time()\n start_time = self._rc_start_time[name]\n logger.record_tabular(\"time_used/{}\".format(name), end_time - start_time)\n logger.info(\"[test] func {0} time used {1:.2f}\".format(name, end_time - start_time))\n del self._rc_start_time[name]\n\n # Saver manger.\n def new_saver(self, max_to_keep, var_prefix=None):\n \"\"\"\n initialize new tf.Saver\n :param var_prefix: we use var_prefix to filter the variables for saving.\n :param max_to_keep:\n :return:\n \"\"\"\n if self.dl_framework == 'tensorflow':\n import tensorflow as tf\n if var_prefix is None:\n var_prefix = ''\n var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, var_prefix)\n logger.info(\"save variable :\")\n for v in var_list:\n logger.info(v)\n self.saver = tf.train.Saver(var_list=var_list, max_to_keep=max_to_keep, filename=self.checkpoint_dir, save_relative_paths=True)\n elif self.dl_framework == 'pytorch':\n self.max_to_keep = max_to_keep\n self.checkpoint_keep_list = []\n else:\n raise NotImplementedError\n\n def save_checkpoint(self, model_dict=None):\n if self.dl_framework == 'tensorflow':\n import tensorflow as tf\n iter = self.time_step_holder.get_time()\n cpt_name = osp.join(self.checkpoint_dir, 'checkpoint')\n logger.info(\"save checkpoint to \", cpt_name, iter)\n self.saver.save(tf.get_default_session(), cpt_name, global_step=iter)\n elif self.dl_framework == 'pytorch':\n import torch\n iter = self.time_step_holder.get_time()\n torch.save(model_dict, f=tester.checkpoint_dir + \"checkpoint-{}.pt\".format(iter))\n self.checkpoint_keep_list.append(iter)\n if len(self.checkpoint_keep_list) > self.max_to_keep:\n for i in range(len(self.checkpoint_keep_list) - self.max_to_keep):\n rm_ckp_name = tester.checkpoint_dir + \"checkpoint-{}.pt\".format(self.checkpoint_keep_list[i])\n logger.info(\"rm the older checkpoint\", rm_ckp_name)\n os.remove(rm_ckp_name)\n self.checkpoint_keep_list = self.checkpoint_keep_list[-1 * self.max_to_keep:]\n else:\n raise NotImplementedError\n\n def load_checkpoint(self):\n if self.dl_framework == 'tensorflow':\n # TODO: load with variable scope.\n import tensorflow as tf\n cpt_name = osp.join(self.checkpoint_dir)\n logger.info(\"load checkpoint {}\".format(cpt_name))\n ckpt_path = tf.train.latest_checkpoint(cpt_name)\n self.saver.restore(tf.get_default_session(), ckpt_path)\n max_iter = ckpt_path.split('-')[-1]\n self.time_step_holder.set_time(max_iter)\n return int(max_iter), None\n elif self.dl_framework == 'pytorch':\n import torch\n return self.checkpoint_keep_list[-1], torch.load(tester.checkpoint_dir + \"checkpoint-{}.pt\".format(self.checkpoint_keep_list[-1]))\n\n def auto_parse_info(self):\n return '&'.join(self.hyper_param_record)\n\n\n def add_graph(self, sess):\n assert self.writer is not None\n self.writer.add_graph(sess.graph)\n\n # --- custom data manager --\n def add_custom_data(self, key, data, dtype=list, max_len=-1):\n if key not in self.custom_data:\n if issubclass(dtype, deque):\n assert max_len > 0\n\n self.custom_data[key] = deque(maxlen=max_len)\n self.custom_data[key].append(data)\n elif issubclass(dtype, list):\n self.custom_data[key] = [data]\n else:\n self.custom_data[key] = data\n else:\n if issubclass(dtype, list) or issubclass(dtype, deque):\n self.custom_data[key].append(data)\n else:\n self.custom_data[key] = data\n\n def print_custom_data(self, key, prefix=''):\n assert key in self.custom_data\n import numpy as np\n mean_val = np.mean(self.custom_data[key])\n logger.record_tabular(prefix + key, mean_val)\n\n def clear_custom_data(self, key):\n if key in self.custom_data:\n del self.custom_data[key]\n else:\n logger.warn(\"[WARN] key [{}], not in custom_data\".format(key))\n\n def get_custom_data(self, key):\n if key not in self.custom_data:\n return None\n else:\n return self.custom_data[key]\n\n def serialize_object_and_save(self):\n \"\"\"\n This method is to save test object to a dill.\n This method will be call every time you call add_custom_record or other record function like self.check_and_test\n \"\"\"\n # remove object which can is not serializable\n writer = self.writer\n self.writer = None\n saver = self.saver\n self.saver = None\n with open(self.pkl_file, 'wb') as f:\n dill.dump(self, f)\n self.writer = writer\n self.saver = saver\n\n def print_args(self):\n sort_list = sorted(self.hyper_param.items(), key=lambda i: i[0])\n for key, value in sort_list:\n logger.info(\"key: %s, value: %s\" % (key, value))\n\n def print_large_memory_variable(self):\n import sys\n large_mermory_dict = {}\n\n def sizeof_fmt(num, suffix='B'):\n for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f %s%s\" % (num, unit, suffix), unit\n num /= 1024.0\n return \"%.1f %s%s\" % (num, 'Yi', suffix), 'Yi'\n\n for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()),\n key=lambda x: -x[1])[:10]:\n size_str, fmt_type = sizeof_fmt(size)\n if fmt_type in ['', 'Ki', 'Mi']:\n continue\n logger.info(\"{:>30}: {:>8}\".format(name, size_str))\n large_mermory_dict[str(name)] = size_str\n if large_mermory_dict != {}:\n summary = self.dict_to_table_text_summary(large_mermory_dict, 'large_memory')\n self.add_summary_to_logger(summary, 'large_memory')\n\n def dict_to_table_text_summary(self, input_dict, name):\n import tensorflow as tf\n with tf.Session(graph=tf.Graph()) as sess:\n to_tensor = [tf.convert_to_tensor([k, str(v)]) for k, v in input_dict.items()]\n return sess.run(tf.summary.text(name, tf.stack(to_tensor)))\n\n\ntester = Tester()\n"
] | [
[
"tensorflow.get_default_session",
"tensorflow.Graph",
"tensorflow.train.latest_checkpoint",
"tensorflow.get_collection",
"tensorflow.stack",
"tensorflow.core.framework.summary_pb2.Summary",
"numpy.mean",
"tensorflow.train.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
YukiHata-ITS/uda_nd013-c1-vision-starter | [
"4785970ae56a21905d63ae429b3a6ee717804668"
] | [
"workspace/inference_video.py"
] | [
"import argparse\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import animation\n\nfrom object_detection.builders.dataset_builder import build as build_dataset\nfrom object_detection.utils.config_util import get_configs_from_pipeline_file\nfrom object_detection.utils.label_map_util import create_category_index_from_labelmap\nfrom object_detection.utils import visualization_utils as viz_utils\n\nfrom utils import get_module_logger\n\n\ndef main(labelmap_path, model_path, tf_record_path, config_path, output_path):\n \"\"\"\n Use a model and a tf record file and create a mp4 video\n args:\n - labelmap_path [str]: path to labelmap file\n - model_path [str]: path to exported model \n - tf_record_path [str]: path to tf record file to visualize\n - config_path [str]: path to config file\n - output_path [str]: path to mp4 file\n\n Save the results as mp4 file\n \"\"\"\n # load label map\n category_index = create_category_index_from_labelmap(labelmap_path,\n use_display_name=True)\n\n # Load saved model and build the detection function\n logger.info(f'Loading model from {model_path}')\n detect_fn = tf.saved_model.load(model_path)\n\n # open config file\n logger.info(f'Loading config from {config_path}')\n configs = get_configs_from_pipeline_file(config_path)\n eval_config = configs['eval_config']\n eval_input_config = configs['eval_input_config']\n model_config = configs['model']\n\n # update the eval config file\n eval_input_config.tf_record_input_reader.input_path[:] = [tf_record_path]\n dataset = build_dataset(eval_input_config)\n\n # build dataset\n dataset = build_dataset(eval_input_config)\n\n # here we infer on the entire dataset\n images = []\n logger.info(f'Inference on {tf_record_path}')\n for idx, batch in enumerate(dataset):\n if idx % 50:\n logger.info(f'Step: {idx}')\n # add new axis and feed into model \n input_tensor = batch['image']\n image_np = input_tensor.numpy().astype(np.uint8)\n input_tensor = input_tensor[tf.newaxis, ...]\n\n detections = detect_fn(input_tensor)\n \n # tensor -> numpy arr, remove one dimensions\n num_detections = int(detections.pop('num_detections'))\n detections = {key: value[0, ...].numpy()\n for key, value in detections.items()}\n detections['num_detections'] = num_detections\n\n # detection_classes should be ints.\n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n\n image_np_with_detections = image_np.copy()\n viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_with_detections,\n detections['detection_boxes'],\n detections['detection_classes'],\n detections['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=200,\n min_score_thresh=.30,\n agnostic_mode=False)\n images.append(image_np_with_detections)\n \n # now we can create the animation\n f = plt.figure()\n f.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)\n ax = plt.subplot(111)\n ax.axis('off')\n im_obj = ax.imshow(images[0])\n\n def animate(idx):\n image = images[idx]\n im_obj.set_data(image)\n \n anim = animation.FuncAnimation(f, animate, frames=198)\n anim.save(output_path, fps=5, dpi=300)\n\n\nif __name__ == \"__main__\": \n logger = get_module_logger(__name__)\n\n parser = argparse.ArgumentParser(description='Create video')\n parser.add_argument('--labelmap_path', required=True, type=str,\n help='path to the label map')\n parser.add_argument('--model_path', required=True, type=str,\n help='path to the saved model folder')\n parser.add_argument('--tf_record_path', required=True, type=str,\n help='path to the tf record file')\n parser.add_argument('--config_path', required=False, type=str,\n default='pipeline.config', \n help='path to the config file')\n parser.add_argument('--output_path', required=False, type=str, \n default='animation.mp4', \n help='path of the saved file')\n args = parser.parse_args()\n main(args.labelmap_path, \n args.model_path, \n args.tf_record_path, \n args.config_path, \n args.output_path)\n"
] | [
[
"matplotlib.pyplot.subplot",
"matplotlib.animation.FuncAnimation",
"tensorflow.saved_model.load",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
periakiva/finding_berries | [
"1dfc7cf00c384321e39872921051dc9535355e53",
"1dfc7cf00c384321e39872921051dc9535355e53",
"1dfc7cf00c384321e39872921051dc9535355e53"
] | [
"models/fpn/fpn_decoder.py",
"training/cranberry_segmentation/sandbox.py",
"evaluation/segmentation/list_evaluator.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Conv3x3GNReLU(nn.Module):\n def __init__(self, in_channels, out_channels, upsample=False):\n super().__init__()\n self.upsample = upsample\n self.block = nn.Sequential(\n nn.Conv2d(\n in_channels, out_channels, (3, 3), stride=1, padding=1, bias=False\n ),\n nn.GroupNorm(32, out_channels),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n x = self.block(x)\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode=\"bilinear\", align_corners=True)\n return x\n\n\nclass FPNBlock(nn.Module):\n def __init__(self, pyramid_channels, skip_channels):\n super().__init__()\n self.skip_conv = nn.Conv2d(skip_channels, pyramid_channels, kernel_size=1)\n\n def forward(self, x, skip=None):\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n skip = self.skip_conv(skip)\n x = x + skip\n return x\n\n\nclass SegmentationBlock(nn.Module):\n def __init__(self, in_channels, out_channels, n_upsamples=0):\n super().__init__()\n\n blocks = [Conv3x3GNReLU(in_channels, out_channels, upsample=bool(n_upsamples))]\n\n if n_upsamples > 1:\n for _ in range(1, n_upsamples):\n blocks.append(Conv3x3GNReLU(out_channels, out_channels, upsample=True))\n\n self.block = nn.Sequential(*blocks)\n\n def forward(self, x):\n return self.block(x)\n\n\nclass MergeBlock(nn.Module):\n def __init__(self, policy):\n super().__init__()\n if policy not in [\"add\", \"cat\"]:\n raise ValueError(\n \"`merge_policy` must be one of: ['add', 'cat'], got {}\".format(\n policy\n )\n )\n self.policy = policy\n\n def forward(self, x):\n if self.policy == 'add':\n return sum(x)\n elif self.policy == 'cat':\n return torch.cat(x, dim=1)\n else:\n raise ValueError(\n \"`merge_policy` must be one of: ['add', 'cat'], got {}\".format(self.policy)\n )\n\n\nclass FPNDecoder(nn.Module):\n def __init__(\n self,\n encoder_channels,\n encoder_depth=5,\n pyramid_channels=256,\n segmentation_channels=128,\n dropout=0.2,\n merge_policy=\"add\",\n ):\n super().__init__()\n\n self.out_channels = segmentation_channels if merge_policy == \"add\" else segmentation_channels * 4\n if encoder_depth < 3:\n raise ValueError(\"Encoder depth for FPN decoder cannot be less than 3, got {}.\".format(encoder_depth))\n\n encoder_channels = encoder_channels[::-1]\n encoder_channels = encoder_channels[:encoder_depth + 1]\n\n self.p5 = nn.Conv2d(encoder_channels[0], pyramid_channels, kernel_size=1)\n self.p4 = FPNBlock(pyramid_channels, encoder_channels[1])\n self.p3 = FPNBlock(pyramid_channels, encoder_channels[2])\n self.p2 = FPNBlock(pyramid_channels, encoder_channels[3])\n \n self.seg_blocks = nn.ModuleList([\n SegmentationBlock(pyramid_channels, segmentation_channels, n_upsamples=n_upsamples)\n for n_upsamples in [3, 2, 1, 0]\n ])\n\n self.merge = MergeBlock(merge_policy)\n self.dropout = nn.Dropout2d(p=dropout, inplace=True)\n\n def forward(self, *features):\n c2, c3, c4, c5 = features[-4:]\n\n p5 = self.p5(c5)\n p4 = self.p4(p5, c4)\n p3 = self.p3(p4, c3)\n p2 = self.p2(p3, c2)\n\n feature_pyramid = [seg_block(p) for seg_block, p in zip(self.seg_blocks, [p5, p4, p3, p2])]\n x = self.merge(feature_pyramid)\n x = self.dropout(x)\n\n return x",
"import numpy as np\nfrom collections import namedtuple\nfrom scipy import ndimage\nPoint = namedtuple('Point', 'x y')\n\narray = np.array([[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0]])\n# print(array)\n# index,uniques = np.unique(array,return_index=True)\n# print(index)\n# print(uniques)\n\nblobs = array==1\nprint(blobs)\nlabels, nlabels = ndimage.label(blobs)\n\nprint(labels)\nprint(nlabels)",
"import sys\nimport os\ncurrent_path = os.getcwd().split(\"/\")\nif 'projects' in current_path:\n sys.path.append(\"/home/native/projects/cranberry_counting/\")\nelse:\n sys.path.append(\"/app/cranberry_counting/\")\n\nimport gc\nimport comet_ml\nimport utils.utils as utils\nimport torch\nfrom scipy import ndimage\nimport torch.optim as optim\nfrom torch import nn\nimport torchvision as tv\nfrom torchvision.models import inception_v3\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nfrom models import unet, loss, unet_refined\nfrom models.unet_regression import unet_regres\nfrom peterpy import peter\nfrom datasets.cranberries import cranberry_dataset\nimport numpy as np\nfrom tqdm import tqdm\nimport utils.eval_utils as eval_utils\nimport datetime\nfrom skimage.morphology import watershed\nfrom skimage.segmentation import find_boundaries\nfrom skimage import morphology\nimport warnings\nimport yaml\nimport losses\nwarnings.filterwarnings('ignore')\n\nclass Evaluator(object):\n # def __init__(self,model,test_loader,criterion,has_mask=False):\n def __init__(self,model,test_loader,criterion,\n test_with_full_supervision = 0,has_mask=True):\n\n self.model = model\n self.criterion = criterion\n self.save_figures = False\n self.visualizer_indicator = True\n if test_loader is not None:\n self.test_loader = test_loader\n self.test_with_full_supervision = test_with_full_supervision\n\n def visualizer(self,pred,imgs,masks,estimated_count,detection_count,gt_count):\n\n if pred.shape[0]>1 and len(pred.shape)==3:\n print(f\"pred mask: {pred.shape}\")\n pred = pred[0,:,:]\n imgs = imgs[0,:,:,].unsqueeze_(0)\n masks = masks[0,:,:,].unsqueeze_(0)\n imgs = imgs.cpu().detach().numpy().squeeze()\n masks = masks.cpu().detach().numpy()\n blobs = pred==1\n # labels, nlabels = ndimage.label(blobs)\n labels, nlabels = morphology.label(blobs,return_num=True)\n\n # count_by_detection = 0\n # for label in range(1,nlabels):\n # inds = np.argwhere(labels==label)\n # area = inds.shape[0]\n # x = inds[:,0]\n # y = inds[:,1]\n # if area < 20:\n # labels[x,y] = 0\n # if area > 20:\n # count_by_detection = count_by_detection + 1\n\n cmap = plt.cm.get_cmap('tab10')\n labels_imshow = np.ma.masked_where(labels==0,labels)\n\n fig = plt.figure()\n ax1 = fig.add_subplot(3,2,1)\n ax1.title.set_text(\"Semantic Prediction\")\n ax1.imshow(pred)\n\n ax2 = fig.add_subplot(3,2,2)\n ax2.title.set_text(\"GT\")\n ax2.imshow(np.transpose(masks,(1,2,0)).squeeze())\n\n ax3 = fig.add_subplot(3,2,3)\n ax3.title.set_text(\"Image\")\n ax3.imshow(np.transpose(imgs,(1,2,0)))\n\n ax4 = fig.add_subplot(3,2,4)\n ax4.title.set_text(\"Instance Overlay\")\n ax4.imshow(np.transpose(imgs,(1,2,0)))\n ax4.imshow(labels_imshow,interpolation='none',cmap=cmap,alpha=0.9,vmin=0)\n\n ax5 = fig.add_subplot(3,2,5)\n ax5.imshow(labels,cmap=cmap)\n ax5.title.set_text(\"Instance Prediction\")\n \n fig.suptitle(f\"gt count: {gt_count}, regress count: {round(estimated_count)} count_detection: {round(detection_count)}\",\n y=0.98)\n \n return fig\n\n\n def evaluate(self,cometml_experiment):\n self.model.eval()\n print(\"testing\")\n total_loss = 0\n preds,targets,counts,estimated_counts,detection_counts = [],[],[],[], []\n # loss_type = \"_\".join(self.losses_to_use)\n # loss_weights_str = \"_\".join([str(x)+str(y) for x,y in self.loss_weights.items()])\n if self.test_with_full_supervision == 1:\n loader = self.test_loader\n with torch.no_grad():\n for batch_index,batch in enumerate(loader):\n imgs,masks,count = batch\n\n imgs = imgs.to(device)\n masks = masks.to(device).squeeze(1)\n count = count.to(device)\n output, count_estimation = self.model.forward(imgs)\n \n loss = self.criterion(output,masks)\n pred = output.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()\n \n blobs = pred==1\n labels, nlabels = ndimage.label(blobs)\n count_by_detection = 0\n for label in range(1,nlabels):\n inds = np.argwhere(labels==label)\n area = inds.shape[0]\n x = inds[:,0]\n y = inds[:,1]\n if area < 25:\n labels[x,y] = 0\n if area > 25:\n count_by_detection = count_by_detection + 1\n\n\n\n if self.visualizer_indicator:\n figure = self.visualizer(pred,imgs,masks,count_estimation.item(),count_by_detection,count.item())\n cometml_experiment.log_figure(figure_name=f\"test, current loss: {loss}\",figure=figure)\n # model.predict(batch)\n if self.save_figures:\n # print(\"saving image\")\n figure_save_path = f\"/home/native/projects/cranberry_counting/visualization_outputs/points/{loss_type}_{loss_weights_str}/\"\n utils.create_dir_if_doesnt_exist(figure_save_path)\n figure.savefig(f\"{figure_save_path}/testing_loss_{loss}_detectcount_{count_by_detection}_estimatedcount_{count_estimation.item()}_gt_count_{count.item()}.png\",dpi=300)\n figure.clear()\n plt.cla()\n plt.clf()\n plt.close('all')\n plt.close(figure)\n gc.collect()\n masks = masks.squeeze_(0).cpu().numpy()\n preds.append(pred)\n targets.append(masks)\n counts.append(count.item())\n estimated_counts.append(count_estimation.item())\n detection_counts.append(count_by_detection)\n total_loss+=loss.item()\n # val_mae_lcfcn = eval_utils.val_mae(estimated_counts,counts)\n count_mae = eval_utils.mae(estimated_counts,counts)\n count_rmse = eval_utils.rmse(estimated_counts,counts)\n count_mape = eval_utils.mape(estimated_counts,counts)\n\n detection_count_mae = eval_utils.mae(detection_counts,counts)\n detection_count_rmse = eval_utils.rmse(detection_counts,counts)\n detection_count_mape = eval_utils.mape(detection_counts,counts)\n count_metrics = {\"regression mae\":count_mae,\"regression rmse\":count_rmse,\"regression mape\":\n count_mape,\"detection mae\":detection_count_mae,\"detection rmse\":detection_count_rmse,\n \"detection mape\":detection_count_mape}\n # print(type(count_metrics[0]))\n _,_,mean_iou,_ = eval_utils.calc_mAP(preds,targets)\n print(\"Validation mIoU value: {0:1.5f}\".format(mean_iou))\n print(f\"Validation Count Regression Mean Average Error: {count_mae}\\nRegression Root Mean Squared Error: {count_rmse}\\nRegression Mean Absolute Percent Error: {count_mape}\\nDetection MAE: {detection_count_mae}\\nDetection RMSE: {detection_count_rmse}\\n Detection MAPE: {detection_count_mape}\")\n print(\"Validation average loss: {1:1.2f}\".format(total_loss/self.val_loader.__len__()))\n\n\n return total_loss/self.val_loader.__len__(), mean_iou,count_metrics\n\n \n def forward(self,cometml_experiment):\n with cometml_experiment.validate():\n self.evaluate(cometml_experiment)\n return\n\nif __name__ == \"__main__\":\n\n project_name = f\"{current_path[-3]}_{current_path[-1]}\"#_{datetime.datetime.today().strftime('%Y-%m-%d-%H:%M')}\"\n experiment = comet_ml.Experiment(api_key=\"9GTK1r9PK4NzMAoLsnC6XxI7p\",project_name=project_name,workspace=\"periakiva\")\n \n config_path = utils.dictionary_contents(os.getcwd()+\"/\",types=[\"*.yaml\"])[0]\n config = utils.config_parser(config_path,experiment_type=\"training\")\n\n torch.set_default_dtype(torch.float32)\n device_cpu = torch.device('cpu')\n device = torch.device('cuda:0') if config['use_cuda'] else device_cpu\n\n # data_dictionary,batch_size,num_workers,instance_seg = False):\n test_loader = cranberry_dataset.build_single_loader(data_dictionary = config['data']['eval_dir'],\n batch_size=config['testing']['batch_size'],\n num_workers=config['testing']['num_workers'],\n instance_seg=config['data']['instance_seg'],\n test=config['testing']['img_only'], has_mask = config['data']['has_mask']\n )\n\n\n with peter('Building Network'):\n model = unet_refined.UNetRefined(n_channels=3,n_classes=2)\n # model = unet_regres.Unet(in_channels=3,classes=2,decoder_channels= (512,256,128),encoder_depth=3)\n num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(\"model has {} trainable parameters\".format(num_params))\n # model = nn.DataParallel(model)\n model.to(device)\n model.cuda()\n \n\n class_weights = torch.Tensor((1,1)).float()\n class_weights = class_weights.to(device)\n loss_segmentation = nn.CrossEntropyLoss(class_weights)\n # loss_convexity = loss.ConvexShapeLoss(height=456,width=608,device=device)\n optimizer = optim.Adam(model.parameters(),\n lr=config['testing']['learning_rate'],\n amsgrad=True)\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,len(test_loader),eta_min = config['testing']['learning_rate'])\n start_epoch = 0\n lowest_mahd = np.infty\n model_paths = utils.dictionary_contents(config['data']['models_lists_dir'],types=[\"*.pth\"])\n for model_path in model_paths:\n #TODO: Add resume option to Trainer using below code\n if config['testing']['resume'] != False:\n with peter('Loading checkpoints'):\n if os.path.isfile(config['testing']['resume']):\n # model = torch.load(config['training']['resume'])\n checkpoint = torch.load(config['testing']['resume'])\n # print(checkpoint)\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n print(f\"loaded model from {config['testing']['resume']}\")\n # print(\"Loaded checkpoint {}, now at epoch: {}\".format(config['training']['resume'],checkpoint['epoch'])) \n else:\n print(\"no checkpoint found at {}\".format(config['testing']['resume']))\n exit()\n\n\n \n class_weights = torch.Tensor((1,1)).float()\n class_weights = class_weights.to(device)\n loss_segmentation = nn.CrossEntropyLoss(class_weights)\n\n # model = torch.load(config['data']['model_name'])\n # model.load_state_dict(checkpoint['model'])\n\n evalutor = Evaluator(model=model,test_loader = test_loader,criterion=loss_segmentation,has_mask=config['data']['has_mask'])\n evalutor.forward(experiment)"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.functional.interpolate",
"torch.nn.GroupNorm",
"torch.nn.ReLU"
],
[
"scipy.ndimage.label",
"numpy.array"
],
[
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.cm.get_cmap",
"torch.Tensor",
"torch.set_default_dtype",
"torch.load",
"matplotlib.pyplot.cla",
"numpy.argwhere",
"scipy.ndimage.label",
"matplotlib.pyplot.clf",
"torch.no_grad",
"matplotlib.pyplot.close",
"numpy.transpose",
"torch.device",
"numpy.ma.masked_where",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
cbigit/unet | [
"89d5576624620293419f1fa8fc16b47219dcad0e"
] | [
"2D/plot_tf_inference_examples.py"
] | [
"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: EPL-2.0\n#\n\n\"\"\"\nTakes a trained model and performs inference on a few validation examples.\n\"\"\"\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # Get rid of the AVX, SSE warnings\n\nimport numpy as np\nimport tensorflow as tf\nimport time\nfrom tensorflow import keras as K\nimport settings\nimport argparse\nfrom dataloader import DatasetGenerator\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use(\"Agg\")\n\n\nparser = argparse.ArgumentParser(\n description=\"TensorFlow Inference example for trained 2D U-Net model on BraTS.\",\n add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument(\"--data_path\", default=settings.DATA_PATH,\n help=\"the path to the data\")\nparser.add_argument(\"--output_path\", default=settings.OUT_PATH,\n help=\"the folder to save the model and checkpoints\")\nparser.add_argument(\"--inference_filename\", default=settings.INFERENCE_FILENAME,\n help=\"the TensorFlow inference model filename\")\nparser.add_argument(\"--use_pconv\",help=\"use partial convolution based padding\",\n action=\"store_true\",\n default=settings.USE_PCONV)\nparser.add_argument(\"--output_pngs\", default=\"inference_examples\",\n help=\"the directory for the output prediction pngs\")\n\nparser.add_argument(\"--intraop_threads\", default=settings.NUM_INTRA_THREADS,\n type=int, help=\"Number of intra-op-parallelism threads\")\nparser.add_argument(\"--interop_threads\", default=settings.NUM_INTER_THREADS,\n type=int, help=\"Number of inter-op-parallelism threads\")\nparser.add_argument(\"--crop_dim\", default=settings.CROP_DIM,\n type=int, help=\"Crop dimension for images\")\nparser.add_argument(\"--seed\", default=settings.SEED,\n type=int, help=\"Random seed\")\n\nargs = parser.parse_args()\n\ndef test_intel_tensorflow():\n \"\"\"\n Check if Intel version of TensorFlow is installed\n \"\"\"\n import tensorflow as tf\n \n print(\"We are using Tensorflow version {}\".format(tf.__version__))\n \n major_version = int(tf.__version__.split(\".\")[0])\n if major_version >= 2:\n from tensorflow.python import _pywrap_util_port\n print(\"Intel-optimizations (DNNL) enabled:\", _pywrap_util_port.IsMklEnabled())\n else:\n print(\"Intel-optimizations (DNNL) enabled:\", tf.pywrap_tensorflow.IsMklEnabled()) \n\ntest_intel_tensorflow()\n\n\ndef calc_dice(target, prediction, smooth=0.0001):\n \"\"\"\n Sorenson Dice\n \\frac{ 2 \\times \\left | T \\right | \\cap \\left | P \\right |}{ \\left | T \\right | + \\left | P \\right | }\n where T is ground truth (target) mask and P is the prediction mask\n \"\"\"\n prediction = np.round(prediction)\n\n numerator = 2.0 * np.sum(target * prediction) + smooth\n denominator = np.sum(target) + np.sum(prediction) + smooth\n coef = numerator / denominator\n\n return coef\n\n\ndef calc_soft_dice(target, prediction, smooth=0.0001):\n \"\"\"\n Sorensen (Soft) Dice coefficient - Don't round predictions\n \"\"\"\n numerator = 2.0 * np.sum(target * prediction) + smooth\n denominator = np.sum(target) + np.sum(prediction) + smooth\n coef = numerator / denominator\n\n return coef\n\n\ndef plot_results(ds, idx, png_directory):\n \n dt = ds.get_dataset().take(1).as_numpy_iterator() # Get some examples (use different seed for different samples)\n\n plt.figure(figsize=(10,10))\n\n for img, msk in dt:\n\n plt.subplot(1, 3, 1)\n plt.imshow(img[idx, :, :, 0], cmap=\"bone\", origin=\"lower\")\n plt.title(\"MRI {}\".format(idx), fontsize=20)\n\n plt.subplot(1, 3, 2)\n plt.imshow(msk[idx, :, :], cmap=\"bone\", origin=\"lower\")\n plt.title(\"Ground truth\", fontsize=20)\n\n plt.subplot(1, 3, 3)\n\n print(\"Index {}: \".format(idx), end=\"\")\n \n # Predict using the TensorFlow model\n start_time = time.time()\n prediction = model.predict(img[[idx]])\n print(\"Elapsed time = {:.4f} msecs, \".format(1000.0*(time.time()-start_time)), end=\"\")\n \n plt.imshow(prediction[0,:,:,0], cmap=\"bone\", origin=\"lower\")\n dice_coef = calc_dice(msk[idx], prediction)\n print(\"Dice coefficient = {:.4f}, \".format(dice_coef), end=\"\")\n plt.title(\"Prediction\\nDice = {:.4f}\".format(dice_coef), fontsize=20)\n\n save_name = os.path.join(png_directory, \"prediction_tf_{}.png\".format(idx))\n print(\"Saved as: {}\".format(save_name))\n plt.savefig(save_name)\n \nif __name__ == \"__main__\":\n\n model_filename = os.path.join(args.output_path, args.inference_filename)\n\n ds_testing = DatasetGenerator(os.path.join(args.data_path, \"testing/*.npz\"), \n crop_dim=args.crop_dim, \n batch_size=128, \n augment=False, \n seed=args.seed)\n # Load model\n if args.use_pconv:\n from model_pconv import unet\n unet_model = unet(use_pconv=True)\n else:\n from model import unet\n unet_model = unet()\n \n \n model = unet_model.load_model(model_filename)\n\n # Create output directory for images\n png_directory = args.output_pngs\n if not os.path.exists(png_directory):\n os.makedirs(png_directory)\n\n # Plot some results\n # The plots will be saved to the png_directory\n # Just picking some random samples.\n indicies_testing = [11,17,25,56,89,101,119]\n\n for idx in indicies_testing:\n plot_results(ds_testing, idx, png_directory)\n"
] | [
[
"matplotlib.pyplot.imshow",
"tensorflow.__version__.split",
"matplotlib.pyplot.title",
"matplotlib.use",
"tensorflow.python._pywrap_util_port.IsMklEnabled",
"matplotlib.pyplot.savefig",
"numpy.round",
"matplotlib.pyplot.subplot",
"tensorflow.pywrap_tensorflow.IsMklEnabled",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
willfrey/ray | [
"288a81b42ef0186ab4db33b30191614a7bdb69f6",
"288a81b42ef0186ab4db33b30191614a7bdb69f6",
"288a81b42ef0186ab4db33b30191614a7bdb69f6"
] | [
"python/ray/workflow/tests/test_checkpoint_2.py",
"python/ray/tests/test_failure_4.py",
"rllib/utils/tf_utils.py"
] | [
"import ray\nimport time\nimport pytest\nfrom ray.tests.conftest import * # noqa\n\nimport numpy as np\nfrom ray import workflow\nfrom ray.workflow.tests import utils\nfrom ray.exceptions import RaySystemError\n\n\nSIZE = 2 ** 15\n\n\[email protected]\ndef checkpoint_dag(checkpoint):\n @ray.remote\n def large_input():\n return np.arange(SIZE)\n\n @ray.remote\n def identity(x):\n if not utils.check_global_mark():\n import os\n\n os.kill(os.getpid(), 9)\n return x\n\n @ray.remote\n def average(x):\n return np.mean(x)\n\n x = large_input.options(**workflow.options(checkpoint=checkpoint)).bind()\n y = identity.options(**workflow.options(checkpoint=checkpoint)).bind(x)\n return workflow.continuation(average.bind(y))\n\n\ndef test_checkpoint_dag_recovery_skip(workflow_start_regular_shared):\n utils.unset_global_mark()\n\n start = time.time()\n with pytest.raises(RaySystemError):\n workflow.create(\n checkpoint_dag.options(**workflow.options(checkpoint=False)).bind(False)\n ).run(workflow_id=\"checkpoint_skip_recovery\")\n run_duration_skipped = time.time() - start\n\n utils.set_global_mark()\n\n start = time.time()\n recovered = ray.get(workflow.resume(\"checkpoint_skip_recovery\"))\n recover_duration_skipped = time.time() - start\n assert np.isclose(recovered, np.arange(SIZE).mean())\n\n print(\n f\"[skipped] run_duration = {run_duration_skipped}, \"\n f\"recover_duration = {recover_duration_skipped}\"\n )\n\n\ndef test_checkpoint_dag_recovery_partial(workflow_start_regular_shared):\n utils.unset_global_mark()\n\n start = time.time()\n with pytest.raises(RaySystemError):\n workflow.create(checkpoint_dag.bind(False)).run(\n workflow_id=\"checkpoint_partial_recovery\"\n )\n run_duration_partial = time.time() - start\n\n utils.set_global_mark()\n\n start = time.time()\n recovered = ray.get(workflow.resume(\"checkpoint_partial_recovery\"))\n recover_duration_partial = time.time() - start\n assert np.isclose(recovered, np.arange(SIZE).mean())\n print(\n f\"[partial] run_duration = {run_duration_partial}, \"\n f\"recover_duration = {recover_duration_partial}\"\n )\n\n\ndef test_checkpoint_dag_recovery_whole(workflow_start_regular_shared):\n utils.unset_global_mark()\n\n start = time.time()\n with pytest.raises(RaySystemError):\n workflow.create(checkpoint_dag.bind(True)).run(\n workflow_id=\"checkpoint_whole_recovery\"\n )\n run_duration_whole = time.time() - start\n\n utils.set_global_mark()\n\n start = time.time()\n recovered = ray.get(workflow.resume(\"checkpoint_whole_recovery\"))\n recover_duration_whole = time.time() - start\n assert np.isclose(recovered, np.arange(SIZE).mean())\n\n print(\n f\"[whole] run_duration = {run_duration_whole}, \"\n f\"recover_duration = {recover_duration_whole}\"\n )\n\n\[email protected](\n reason=(\n \"Currently it is not clear how and if we need to check\"\n \"side effects of skipping checkpointing, e.g., the\"\n \"violation of exactly-once execution guarantee of workflow.\"\n )\n)\ndef test_checkpoint_dag_validation(workflow_start_regular):\n @ray.remote\n def identity(x):\n return x\n\n @ray.remote\n def average(x):\n return np.mean(x)\n\n @workflow.step\n def valid_checkpoint_dag_1():\n y = identity.options(checkpoint=False).step(42)\n return average.options(checkpoint=True).step(y)\n\n @workflow.step\n def invalid_checkpoint_dag_1():\n y = identity.options(checkpoint=True).step(42)\n return average.options(checkpoint=True).step(y)\n\n @workflow.step\n def invalid_checkpoint_dag_2():\n y = valid_checkpoint_dag_1.options(checkpoint=False).step()\n return average.options(checkpoint=True).step(y)\n\n valid_checkpoint_dag_1.options(checkpoint=False).step().run()\n # check invalid configuration\n with pytest.raises(RaySystemError):\n invalid_checkpoint_dag_1.options(checkpoint=False).step().run()\n # check invalid configuration\n with pytest.raises(RaySystemError):\n invalid_checkpoint_dag_2.options(checkpoint=False).step().run()\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__]))\n",
"import sys\nimport time\n\nimport ray\n\nimport pytest\nimport grpc\nfrom grpc._channel import _InactiveRpcError\nimport numpy as np\nimport psutil\nimport subprocess\n\nimport ray.ray_constants as ray_constants\n\nfrom ray.cluster_utils import Cluster, cluster_not_supported\nfrom ray import NodeID\nfrom ray.core.generated import node_manager_pb2\nfrom ray.core.generated import node_manager_pb2_grpc\nfrom ray.core.generated import gcs_service_pb2\nfrom ray.core.generated import gcs_service_pb2_grpc\nfrom ray._private.test_utils import (\n init_error_pubsub,\n get_error_message,\n run_string_as_driver,\n wait_for_condition,\n)\nfrom ray._private.test_utils import SignalActor\nfrom ray.exceptions import LocalRayletDiedError\nimport ray.experimental.internal_kv as internal_kv\n\n\ndef search_raylet(cluster):\n \"\"\"Return the number of running processes.\"\"\"\n raylets = []\n for node in cluster.list_all_nodes():\n procs = node.all_processes\n raylet_proc_info = procs.get(ray_constants.PROCESS_TYPE_RAYLET)\n if raylet_proc_info:\n assert len(raylet_proc_info) == 1\n raylet = psutil.Process(raylet_proc_info[0].process.pid)\n if raylet.status() == \"running\":\n raylets.append(psutil.Process(raylet_proc_info[0].process.pid))\n return raylets\n\n\ndef test_retry_system_level_error(ray_start_regular):\n @ray.remote\n class Counter:\n def __init__(self):\n self.value = 0\n\n def increment(self):\n self.value += 1\n return self.value\n\n @ray.remote(max_retries=1)\n def func(counter):\n count = counter.increment.remote()\n if ray.get(count) == 1:\n import os\n\n os._exit(0)\n else:\n return 1\n\n counter1 = Counter.remote()\n r1 = func.remote(counter1)\n assert ray.get(r1) == 1\n\n counter2 = Counter.remote()\n r2 = func.options(max_retries=0).remote(counter2)\n with pytest.raises(ray.exceptions.WorkerCrashedError):\n ray.get(r2)\n\n\ndef test_retry_application_level_error(ray_start_regular):\n @ray.remote\n class Counter:\n def __init__(self):\n self.value = 0\n\n def increment(self):\n self.value += 1\n return self.value\n\n @ray.remote(max_retries=1, retry_exceptions=True)\n def func(counter):\n count = counter.increment.remote()\n if ray.get(count) == 1:\n raise ValueError()\n else:\n return 2\n\n counter1 = Counter.remote()\n r1 = func.remote(counter1)\n assert ray.get(r1) == 2\n\n counter2 = Counter.remote()\n r2 = func.options(max_retries=0).remote(counter2)\n with pytest.raises(ValueError):\n ray.get(r2)\n\n counter3 = Counter.remote()\n r3 = func.options(retry_exceptions=False).remote(counter3)\n with pytest.raises(ValueError):\n ray.get(r3)\n\n\[email protected](cluster_not_supported, reason=\"cluster not supported\")\ndef test_connect_with_disconnected_node(shutdown_only):\n config = {\n \"num_heartbeats_timeout\": 50,\n \"raylet_heartbeat_period_milliseconds\": 10,\n }\n cluster = Cluster()\n cluster.add_node(num_cpus=0, _system_config=config)\n ray.init(address=cluster.address)\n p = init_error_pubsub()\n errors = get_error_message(p, 1, timeout=5)\n assert len(errors) == 0\n # This node will be killed by SIGKILL, ray_monitor will mark it to dead.\n dead_node = cluster.add_node(num_cpus=0)\n cluster.remove_node(dead_node, allow_graceful=False)\n errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)\n assert len(errors) == 1\n # This node is killed by SIGKILL, ray_monitor will mark it to dead.\n dead_node = cluster.add_node(num_cpus=0)\n cluster.remove_node(dead_node, allow_graceful=False)\n errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)\n assert len(errors) == 1\n # This node is killed by SIGTERM, ray_monitor will not mark it again.\n removing_node = cluster.add_node(num_cpus=0)\n cluster.remove_node(removing_node, allow_graceful=True)\n errors = get_error_message(p, 1, timeout=2)\n assert len(errors) == 0\n # There is no connection error to a dead node.\n errors = get_error_message(p, 1, timeout=2)\n assert len(errors) == 0\n p.close()\n\n\ndef test_detached_actor_ref(call_ray_start):\n address = call_ray_start\n\n driver_script = \"\"\"\nimport ray\nimport time\n\n\[email protected]\ndef foo(x):\n return ray.put(42)\n\n\[email protected]\nclass Actor:\n def __init__(self):\n self.ref = None\n\n def invoke(self):\n self.ref = foo.remote(0)\n # Wait for the task to finish before exiting the driver.\n ray.get(self.ref)\n\n def get(self):\n print(\"get\", self.ref)\n return self.ref\n\n\nif __name__ == \"__main__\":\n ray.init(address=\"{}\", namespace=\"default\")\n a = Actor.options(name=\"holder\", lifetime=\"detached\").remote()\n # Wait for the task to finish before exiting the driver.\n ray.get(a.invoke.remote())\n print(\"success\")\n\"\"\".format(\n address\n )\n\n out = run_string_as_driver(driver_script)\n assert \"success\" in out\n\n import time\n\n time.sleep(5)\n\n # connect to the cluster\n ray.init(address=address, namespace=\"default\")\n actor = ray.get_actor(\"holder\")\n x = actor.get.remote()\n while isinstance(x, ray.ObjectRef):\n x = ray.get(x)\n assert x == 42\n\n\[email protected](sys.platform == \"win32\", reason=\"Failing on Windows.\")\[email protected](\"debug_enabled\", [False, True])\ndef test_object_lost_error(ray_start_cluster, debug_enabled):\n cluster = ray_start_cluster\n system_config = {\n \"num_heartbeats_timeout\": 3,\n }\n if debug_enabled:\n system_config[\"record_ref_creation_sites\"] = True\n cluster.add_node(num_cpus=0, _system_config=system_config)\n ray.init(address=cluster.address)\n worker_node = cluster.add_node(num_cpus=1)\n\n @ray.remote(num_cpus=1)\n class Actor:\n def __init__(self):\n return\n\n def foo(self):\n return \"x\" * 1000_000\n\n def done(self):\n return\n\n @ray.remote\n def borrower(ref):\n ray.get(ref[0])\n\n @ray.remote\n def task_arg(ref):\n return\n\n a = Actor.remote()\n x = a.foo.remote()\n ray.get(a.done.remote())\n cluster.remove_node(worker_node, allow_graceful=False)\n cluster.add_node(num_cpus=1)\n\n y = borrower.remote([x])\n\n try:\n ray.get(x)\n assert False\n except ray.exceptions.ObjectLostError as e:\n error = str(e)\n print(error)\n assert (\"actor call\" in error) == debug_enabled\n assert (\"test_object_lost_error\" in error) == debug_enabled\n\n try:\n ray.get(y)\n assert False\n except ray.exceptions.RayTaskError as e:\n error = str(e)\n print(error)\n assert (\"actor call\" in error) == debug_enabled\n assert (\"test_object_lost_error\" in error) == debug_enabled\n\n try:\n ray.get(task_arg.remote(x))\n except ray.exceptions.RayTaskError as e:\n error = str(e)\n print(error)\n assert (\"actor call\" in error) == debug_enabled\n assert (\"test_object_lost_error\" in error) == debug_enabled\n\n\[email protected](sys.platform == \"win32\", reason=\"Failing on Windows.\")\[email protected](\n \"ray_start_cluster_head\",\n [\n {\n \"num_cpus\": 0,\n \"_system_config\": {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_period_milliseconds\": 100,\n },\n }\n ],\n indirect=True,\n)\ndef test_raylet_graceful_shutdown_through_rpc(ray_start_cluster_head, error_pubsub):\n \"\"\"\n Prepare the cluster.\n \"\"\"\n cluster = ray_start_cluster_head\n head_node_port = None\n for n in ray.nodes():\n head_node_port = int(n[\"NodeManagerPort\"])\n worker = cluster.add_node(num_cpus=1)\n cluster.wait_for_nodes()\n worker_node_port = None\n for n in ray.nodes():\n port = int(n[\"NodeManagerPort\"])\n if port != head_node_port and n[\"alive\"]:\n worker_node_port = port\n \"\"\"\n warm up the cluster\n \"\"\"\n\n @ray.remote\n def f():\n pass\n\n ray.get(f.remote())\n\n # Kill a raylet gracefully.\n def kill_raylet(ip, port, graceful=True):\n raylet_address = f\"{ip}:{port}\"\n channel = grpc.insecure_channel(raylet_address)\n stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)\n print(f\"Sending a shutdown request to {ip}:{port}\")\n try:\n stub.ShutdownRaylet(\n node_manager_pb2.ShutdownRayletRequest(graceful=graceful)\n )\n except _InactiveRpcError:\n assert not graceful\n\n \"\"\"\n Kill the first worker non-gracefully.\n \"\"\"\n ip = worker.node_ip_address\n kill_raylet(ip, worker_node_port, graceful=False)\n p = error_pubsub\n errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=10)\n # Should print the heartbeat messages.\n assert \"has missed too many heartbeats from it\" in errors[0].error_message\n # NOTE the killed raylet is a zombie since the\n # parent process (the pytest script) hasn't called wait syscall.\n # For normal scenarios where raylet is created by\n # ray start, this issue won't exist.\n try:\n wait_for_condition(lambda: len(search_raylet(cluster)) == 1)\n except Exception:\n print(\"More than one raylets are detected.\")\n print(search_raylet(cluster))\n \"\"\"\n Kill the second worker gracefully.\n \"\"\"\n worker = cluster.add_node(num_cpus=0)\n worker_node_port = None\n for n in ray.nodes():\n port = int(n[\"NodeManagerPort\"])\n if port != head_node_port and n[\"alive\"]:\n worker_node_port = port\n # Kill the second worker gracefully.\n ip = worker.node_ip_address\n kill_raylet(ip, worker_node_port, graceful=True)\n p = error_pubsub\n # Error shouldn't be printed to the driver.\n errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5)\n # Error messages shouldn't be published.\n assert len(errors) == 0\n try:\n wait_for_condition(lambda: len(search_raylet(cluster)) == 1)\n except Exception:\n print(\"More than one raylets are detected.\")\n print(search_raylet(cluster))\n \"\"\"\n Make sure head node is not dead.\n \"\"\"\n ray.get(f.options(num_cpus=0).remote())\n\n\[email protected](sys.platform == \"win32\", reason=\"Failing on Windows.\")\[email protected](\n \"ray_start_cluster_head\",\n [\n {\n \"num_cpus\": 0,\n \"_system_config\": {\n \"num_heartbeats_timeout\": 10,\n \"raylet_heartbeat_period_milliseconds\": 100,\n },\n }\n ],\n indirect=True,\n)\ndef test_gcs_drain(ray_start_cluster_head, error_pubsub):\n \"\"\"\n Prepare the cluster.\n \"\"\"\n cluster = ray_start_cluster_head\n head_node_id = ray.nodes()[0][\"NodeID\"]\n NUM_NODES = 2\n for _ in range(NUM_NODES):\n cluster.add_node(num_cpus=1)\n worker_node_ids = []\n for n in ray.nodes():\n if n[\"NodeID\"] != head_node_id:\n worker_node_ids.append(n[\"NodeID\"])\n \"\"\"\n Warm up the cluster.\n \"\"\"\n\n @ray.remote(num_cpus=1)\n class A:\n def ready(self):\n pass\n\n actors = [A.remote() for _ in range(NUM_NODES)]\n ray.get([actor.ready.remote() for actor in actors])\n \"\"\"\n Test batch drain.\n \"\"\"\n # Prepare requests.\n gcs_server_addr = cluster.gcs_address\n options = ray_constants.GLOBAL_GRPC_OPTIONS\n channel = grpc.insecure_channel(gcs_server_addr, options)\n stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub(channel)\n r = gcs_service_pb2.DrainNodeRequest()\n for worker_id in worker_node_ids:\n data = r.drain_node_data.add()\n data.node_id = NodeID.from_hex(worker_id).binary()\n stub.DrainNode(r)\n\n p = error_pubsub\n # Error shouldn't be printed to the driver.\n errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5)\n assert len(errors) == 0\n # There should be only a head node since we drained worker nodes.\n # NOTE: In the current implementation we kill nodes when draining them.\n # This check should be removed once we implement\n # the proper drain behavior.\n try:\n wait_for_condition(lambda: len(search_raylet(cluster)) == 1)\n except Exception:\n print(\"More than one raylets are detected.\")\n print(search_raylet(cluster))\n \"\"\"\n Make sure the API is idempotent.\n \"\"\"\n for _ in range(10):\n stub.DrainNode(r)\n p = error_pubsub\n # Error shouldn't be printed to the driver.\n errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5)\n assert len(errors) == 0\n \"\"\"\n Make sure the GCS states are updated properly.\n \"\"\"\n for n in ray.nodes():\n node_id = n[\"NodeID\"]\n is_alive = n[\"Alive\"]\n if node_id == head_node_id:\n assert is_alive\n if node_id in worker_node_ids:\n assert not is_alive\n \"\"\"\n Make sure head node is not dead and functional.\n \"\"\"\n a = A.options(num_cpus=0).remote()\n ray.get(a.ready.remote())\n\n\ndef test_worker_start_timeout(monkeypatch, ray_start_cluster):\n # This test is to make sure\n # 1. when worker failed to register, raylet will print useful log\n # 2. raylet will kill hanging worker\n with monkeypatch.context() as m:\n # this delay will make worker start slow\n m.setenv(\n \"RAY_testing_asio_delay_us\",\n \"InternalKVGcsService.grpc_server.InternalKVGet=2000000:2000000\",\n )\n m.setenv(\"RAY_worker_register_timeout_seconds\", \"1\")\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=4, object_store_memory=1e9)\n script = \"\"\"\nimport ray\nray.init(address='auto')\n\[email protected]\ndef task():\n return None\n\nray.get(task.remote(), timeout=3)\n\"\"\"\n with pytest.raises(subprocess.CalledProcessError) as e:\n run_string_as_driver(script)\n\n # make sure log is correct\n assert (\n \"The process is still alive, probably it's hanging during start\"\n ) in e.value.output.decode()\n # worker will be killed so it won't try to register to raylet\n assert (\n \"Received a register request from an unknown worker shim process\"\n ) not in e.value.output.decode()\n\n\ndef test_task_failure_when_driver_local_raylet_dies(ray_start_cluster):\n cluster = ray_start_cluster\n head = cluster.add_node(num_cpus=4, resources={\"foo\": 1})\n cluster.wait_for_nodes()\n ray.init(address=cluster.address)\n\n @ray.remote(resources={\"foo\": 1})\n def func():\n internal_kv._internal_kv_put(\"test_func\", \"func\")\n while True:\n time.sleep(1)\n\n func.remote()\n while not internal_kv._internal_kv_exists(\"test_func\"):\n time.sleep(0.1)\n\n # The lease request should wait inside raylet\n # since there is no available resources.\n ret = func.remote()\n # Waiting for the lease request to reach raylet.\n time.sleep(1)\n head.kill_raylet()\n with pytest.raises(LocalRayletDiedError):\n ray.get(ret)\n\n\ndef test_locality_aware_scheduling_for_dead_nodes(shutdown_only):\n \"\"\"Test that locality-ware scheduling can handle dead nodes.\"\"\"\n # Create a cluster with 4 nodes.\n config = {\n \"num_heartbeats_timeout\": 5,\n \"raylet_heartbeat_period_milliseconds\": 50,\n }\n cluster = Cluster()\n cluster.add_node(num_cpus=4, resources={\"node1\": 1}, _system_config=config)\n cluster.wait_for_nodes()\n ray.init(address=cluster.address)\n\n node2 = cluster.add_node(num_cpus=4, resources={\"node2\": 1})\n node3 = cluster.add_node(num_cpus=4, resources={\"node3\": 1})\n node4 = cluster.add_node(num_cpus=4, resources={\"node4\": 1})\n cluster.wait_for_nodes()\n\n # Create 2 objects on node 2.\n @ray.remote(resources={\"node2\": 0.1})\n def create_object():\n return np.zeros(10 * 1024 * 1024, dtype=np.uint8)\n\n obj1 = create_object.remote()\n obj2 = create_object.remote()\n\n # Push these 2 objects to other nodes.\n # node2 will have obj1 and obj2.\n # node3 will have obj1.\n # node4 will have obj2.\n @ray.remote\n class MyActor:\n def __init__(self, obj_refs):\n # Note, we need to keep obj_refs to prevent the objects from\n # being garbage collected.\n self.obj_refs = obj_refs\n self.obj = ray.get(obj_refs)\n\n def ready(self):\n return True\n\n actors = [\n MyActor.options(resources={\"node2\": 0.1}).remote([obj1, obj2]),\n MyActor.options(resources={\"node3\": 0.1}).remote([obj1]),\n MyActor.options(resources={\"node4\": 0.1}).remote([obj2]),\n ]\n\n assert all(ray.get(actor.ready.remote()) is True for actor in actors)\n\n # This function requires obj1 and obj2.\n @ray.remote\n def func(obj1, obj2):\n return ray.worker.global_worker.node.unique_id\n\n # This function should be scheduled to node2. As node2 has both objects.\n assert ray.get(func.remote(obj1, obj2)) == node2.unique_id\n\n # Kill node2, and re-schedule the function.\n # It should be scheduled to either node3 or node4.\n node2.kill_raylet()\n # Waits for the driver to receive the NodeRemoved notification.\n time.sleep(1)\n target_node = ray.get(func.remote(obj1, obj2))\n assert target_node == node3.unique_id or target_node == node4.unique_id\n\n\ndef test_actor_task_fast_fail(ray_start_cluster):\n # Explicitly set `max_task_retries=0` here to show the test scenario.\n @ray.remote(max_restarts=1, max_task_retries=0)\n class SlowActor:\n def __init__(self, signal_actor):\n if ray.get_runtime_context().was_current_actor_reconstructed:\n ray.get(signal_actor.wait.remote())\n\n def ping(self):\n return \"pong\"\n\n signal = SignalActor.remote()\n actor = SlowActor.remote(signal)\n ray.get(actor.ping.remote())\n ray.kill(actor, no_restart=False)\n\n # Wait for a while so that now the driver knows the actor is in\n # RESTARTING state.\n time.sleep(1)\n # An actor task should fail quickly until the actor is restarted if\n # `max_task_retries` is 0.\n with pytest.raises(ray.exceptions.RayActorError):\n ray.get(actor.ping.remote())\n\n signal.send.remote()\n # Wait for a while so that now the driver knows the actor is in\n # ALIVE state.\n time.sleep(1)\n # An actor task should succeed.\n ray.get(actor.ping.remote())\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([\"-v\", __file__]))\n",
"import gym\nfrom gym.spaces import Discrete, MultiDiscrete\nimport numpy as np\nimport tree # pip install dm_tree\nfrom typing import Any, Callable, List, Optional, Type, TYPE_CHECKING, Union\n\nfrom ray.rllib.utils.annotations import PublicAPI\nfrom ray.rllib.utils.framework import try_import_tf\nfrom ray.rllib.utils.spaces.space_utils import get_base_struct_from_space\nfrom ray.rllib.utils.typing import (\n LocalOptimizer,\n ModelGradients,\n PartialTrainerConfigDict,\n SpaceStruct,\n TensorStructType,\n TensorType,\n)\n\nif TYPE_CHECKING:\n from ray.rllib.policy.tf_policy import TFPolicy\n\ntf1, tf, tfv = try_import_tf()\n\n\n@PublicAPI\ndef explained_variance(y: TensorType, pred: TensorType) -> TensorType:\n \"\"\"Computes the explained variance for a pair of labels and predictions.\n\n The formula used is:\n max(-1.0, 1.0 - (std(y - pred)^2 / std(y)^2))\n\n Args:\n y: The labels.\n pred: The predictions.\n\n Returns:\n The explained variance given a pair of labels and predictions.\n \"\"\"\n _, y_var = tf.nn.moments(y, axes=[0])\n _, diff_var = tf.nn.moments(y - pred, axes=[0])\n return tf.maximum(-1.0, 1 - (diff_var / y_var))\n\n\n@PublicAPI\ndef flatten_inputs_to_1d_tensor(\n inputs: TensorStructType,\n spaces_struct: Optional[SpaceStruct] = None,\n time_axis: bool = False,\n) -> TensorType:\n \"\"\"Flattens arbitrary input structs according to the given spaces struct.\n\n Returns a single 1D tensor resulting from the different input\n components' values.\n\n Thereby:\n - Boxes (any shape) get flattened to (B, [T]?, -1). Note that image boxes\n are not treated differently from other types of Boxes and get\n flattened as well.\n - Discrete (int) values are one-hot'd, e.g. a batch of [1, 0, 3] (B=3 with\n Discrete(4) space) results in [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]].\n - MultiDiscrete values are multi-one-hot'd, e.g. a batch of\n [[0, 2], [1, 4]] (B=2 with MultiDiscrete([2, 5]) space) results in\n [[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]].\n\n Args:\n inputs: The inputs to be flattened.\n spaces_struct: The structure of the spaces that behind the input\n time_axis: Whether all inputs have a time-axis (after the batch axis).\n If True, will keep not only the batch axis (0th), but the time axis\n (1st) as-is and flatten everything from the 2nd axis up.\n\n Returns:\n A single 1D tensor resulting from concatenating all\n flattened/one-hot'd input components. Depending on the time_axis flag,\n the shape is (B, n) or (B, T, n).\n\n Examples:\n >>> # B=2\n >>> from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor\n >>> from gym.spaces import Discrete, Box\n >>> out = flatten_inputs_to_1d_tensor( # doctest: +SKIP\n ... {\"a\": [1, 0], \"b\": [[[0.0], [0.1]], [1.0], [1.1]]},\n ... spaces_struct=dict(a=Discrete(2), b=Box(shape=(2, 1)))\n ... ) # doctest: +SKIP\n >>> print(out) # doctest: +SKIP\n [[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]] # B=2 n=4\n\n >>> # B=2; T=2\n >>> out = flatten_inputs_to_1d_tensor( # doctest: +SKIP\n ... ([[1, 0], [0, 1]],\n ... [[[0.0, 0.1], [1.0, 1.1]], [[2.0, 2.1], [3.0, 3.1]]]),\n ... spaces_struct=tuple([Discrete(2), Box(shape=(2, ))]),\n ... time_axis=True\n ... ) # doctest: +SKIP\n >>> print(out) # doctest: +SKIP\n [[[0.0, 1.0, 0.0, 0.1], [1.0, 0.0, 1.0, 1.1]],\\\n [[1.0, 0.0, 2.0, 2.1], [0.0, 1.0, 3.0, 3.1]]] # B=2 T=2 n=4\n \"\"\"\n\n flat_inputs = tree.flatten(inputs)\n flat_spaces = (\n tree.flatten(spaces_struct)\n if spaces_struct is not None\n else [None] * len(flat_inputs)\n )\n\n B = None\n T = None\n out = []\n for input_, space in zip(flat_inputs, flat_spaces):\n input_ = tf.convert_to_tensor(input_)\n shape = tf.shape(input_)\n # Store batch and (if applicable) time dimension.\n if B is None:\n B = shape[0]\n if time_axis:\n T = shape[1]\n\n # One-hot encoding.\n if isinstance(space, Discrete):\n if time_axis:\n input_ = tf.reshape(input_, [B * T])\n out.append(tf.cast(one_hot(input_, space), tf.float32))\n elif isinstance(space, MultiDiscrete):\n if time_axis:\n input_ = tf.reshape(input_, [B * T, -1])\n out.append(tf.cast(one_hot(input_, space), tf.float32))\n # Flatten.\n else:\n if time_axis:\n input_ = tf.reshape(input_, [B * T, -1])\n else:\n input_ = tf.reshape(input_, [B, -1])\n out.append(tf.cast(input_, tf.float32))\n\n merged = tf.concat(out, axis=-1)\n # Restore the time-dimension, if applicable.\n if time_axis:\n merged = tf.reshape(merged, [B, T, -1])\n\n return merged\n\n\n@PublicAPI\ndef get_gpu_devices() -> List[str]:\n \"\"\"Returns a list of GPU device names, e.g. [\"/gpu:0\", \"/gpu:1\"].\n\n Supports both tf1.x and tf2.x.\n\n Returns:\n List of GPU device names (str).\n \"\"\"\n if tfv == 1:\n from tensorflow.python.client import device_lib\n\n devices = device_lib.list_local_devices()\n else:\n try:\n devices = tf.config.list_physical_devices()\n except Exception:\n devices = tf.config.experimental.list_physical_devices()\n\n # Expect \"GPU\", but also stuff like: \"XLA_GPU\".\n return [d.name for d in devices if \"GPU\" in d.device_type]\n\n\n@PublicAPI\ndef get_placeholder(\n *,\n space: Optional[gym.Space] = None,\n value: Optional[Any] = None,\n name: Optional[str] = None,\n time_axis: bool = False,\n flatten: bool = True\n) -> \"tf1.placeholder\":\n \"\"\"Returns a tf1.placeholder object given optional hints, such as a space.\n\n Note that the returned placeholder will always have a leading batch\n dimension (None).\n\n Args:\n space: An optional gym.Space to hint the shape and dtype of the\n placeholder.\n value: An optional value to hint the shape and dtype of the\n placeholder.\n name: An optional name for the placeholder.\n time_axis: Whether the placeholder should also receive a time\n dimension (None).\n flatten: Whether to flatten the given space into a plain Box space\n and then create the placeholder from the resulting space.\n\n Returns:\n The tf1 placeholder.\n \"\"\"\n from ray.rllib.models.catalog import ModelCatalog\n\n if space is not None:\n if isinstance(space, (gym.spaces.Dict, gym.spaces.Tuple)):\n if flatten:\n return ModelCatalog.get_action_placeholder(space, None)\n else:\n return tree.map_structure_with_path(\n lambda path, component: get_placeholder(\n space=component,\n name=name + \".\" + \".\".join([str(p) for p in path]),\n ),\n get_base_struct_from_space(space),\n )\n return tf1.placeholder(\n shape=(None,) + ((None,) if time_axis else ()) + space.shape,\n dtype=tf.float32 if space.dtype == np.float64 else space.dtype,\n name=name,\n )\n else:\n assert value is not None\n shape = value.shape[1:]\n return tf1.placeholder(\n shape=(None,)\n + ((None,) if time_axis else ())\n + (shape if isinstance(shape, tuple) else tuple(shape.as_list())),\n dtype=tf.float32 if value.dtype == np.float64 else value.dtype,\n name=name,\n )\n\n\n@PublicAPI\ndef get_tf_eager_cls_if_necessary(\n orig_cls: Type[\"TFPolicy\"], config: PartialTrainerConfigDict\n) -> Type[\"TFPolicy\"]:\n \"\"\"Returns the corresponding tf-eager class for a given TFPolicy class.\n\n Args:\n orig_cls: The original TFPolicy class to get the corresponding tf-eager\n class for.\n config: The Trainer config dict.\n\n Returns:\n The tf eager policy class corresponding to the given TFPolicy class.\n \"\"\"\n cls = orig_cls\n framework = config.get(\"framework\", \"tf\")\n\n if framework in [\"tf2\", \"tf\", \"tfe\"] and not tf1:\n raise ImportError(\"Could not import tensorflow!\")\n\n if framework in [\"tf2\", \"tfe\"]:\n assert tf1.executing_eagerly()\n\n from ray.rllib.policy.tf_policy import TFPolicy\n from ray.rllib.policy.eager_tf_policy import EagerTFPolicy\n from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2\n\n # Create eager-class (if not already one).\n if hasattr(orig_cls, \"as_eager\") and not issubclass(orig_cls, EagerTFPolicy):\n cls = orig_cls.as_eager()\n # Could be some other type of policy or already\n # eager-ized.\n elif not issubclass(orig_cls, TFPolicy):\n pass\n else:\n raise ValueError(\n \"This policy does not support eager \" \"execution: {}\".format(orig_cls)\n )\n\n # Now that we know, policy is an eager one, add tracing, if necessary.\n if config.get(\"eager_tracing\") and issubclass(\n cls, (EagerTFPolicy, EagerTFPolicyV2)\n ):\n cls = cls.with_tracing()\n return cls\n\n\n@PublicAPI\ndef huber_loss(x: TensorType, delta: float = 1.0) -> TensorType:\n \"\"\"Computes the huber loss for a given term and delta parameter.\n\n Reference: https://en.wikipedia.org/wiki/Huber_loss\n Note that the factor of 0.5 is implicitly included in the calculation.\n\n Formula:\n L = 0.5 * x^2 for small abs x (delta threshold)\n L = delta * (abs(x) - 0.5*delta) for larger abs x (delta threshold)\n\n Args:\n x: The input term, e.g. a TD error.\n delta: The delta parmameter in the above formula.\n\n Returns:\n The Huber loss resulting from `x` and `delta`.\n \"\"\"\n return tf.where(\n tf.abs(x) < delta, # for small x -> apply the Huber correction\n tf.math.square(x) * 0.5,\n delta * (tf.abs(x) - 0.5 * delta),\n )\n\n\n@PublicAPI\ndef make_tf_callable(\n session_or_none: Optional[\"tf1.Session\"], dynamic_shape: bool = False\n) -> Callable:\n \"\"\"Returns a function that can be executed in either graph or eager mode.\n\n The function must take only positional args.\n\n If eager is enabled, this will act as just a function. Otherwise, it\n will build a function that executes a session run with placeholders\n internally.\n\n Args:\n session_or_none: tf.Session if in graph mode, else None.\n dynamic_shape: True if the placeholders should have a dynamic\n batch dimension. Otherwise they will be fixed shape.\n\n Returns:\n A function that can be called in either eager or static-graph mode.\n \"\"\"\n\n if tf.executing_eagerly():\n assert session_or_none is None\n else:\n assert session_or_none is not None\n\n def make_wrapper(fn):\n # Static-graph mode: Create placeholders and make a session call each\n # time the wrapped function is called. Returns the output of this\n # session call.\n if session_or_none is not None:\n args_placeholders = []\n kwargs_placeholders = {}\n\n symbolic_out = [None]\n\n def call(*args, **kwargs):\n args_flat = []\n for a in args:\n if type(a) is list:\n args_flat.extend(a)\n else:\n args_flat.append(a)\n args = args_flat\n\n # We have not built any placeholders yet: Do this once here,\n # then reuse the same placeholders each time we call this\n # function again.\n if symbolic_out[0] is None:\n with session_or_none.graph.as_default():\n\n def _create_placeholders(path, value):\n if dynamic_shape:\n if len(value.shape) > 0:\n shape = (None,) + value.shape[1:]\n else:\n shape = ()\n else:\n shape = value.shape\n return tf1.placeholder(\n dtype=value.dtype,\n shape=shape,\n name=\".\".join([str(p) for p in path]),\n )\n\n placeholders = tree.map_structure_with_path(\n _create_placeholders, args\n )\n for ph in tree.flatten(placeholders):\n args_placeholders.append(ph)\n\n placeholders = tree.map_structure_with_path(\n _create_placeholders, kwargs\n )\n for k, ph in placeholders.items():\n kwargs_placeholders[k] = ph\n\n symbolic_out[0] = fn(*args_placeholders, **kwargs_placeholders)\n feed_dict = dict(zip(args_placeholders, tree.flatten(args)))\n tree.map_structure(\n lambda ph, v: feed_dict.__setitem__(ph, v),\n kwargs_placeholders,\n kwargs,\n )\n ret = session_or_none.run(symbolic_out[0], feed_dict)\n return ret\n\n return call\n # Eager mode (call function as is).\n else:\n return fn\n\n return make_wrapper\n\n\n@PublicAPI\ndef minimize_and_clip(\n optimizer: LocalOptimizer,\n objective: TensorType,\n var_list: List[\"tf.Variable\"],\n clip_val: float = 10.0,\n) -> ModelGradients:\n \"\"\"Computes, then clips gradients using objective, optimizer and var list.\n\n Ensures the norm of the gradients for each variable is clipped to\n `clip_val`.\n\n Args:\n optimizer: Either a shim optimizer (tf eager) containing a\n tf.GradientTape under `self.tape` or a tf1 local optimizer\n object.\n objective: The loss tensor to calculate gradients on.\n var_list: The list of tf.Variables to compute gradients over.\n clip_val: The global norm clip value. Will clip around -clip_val and\n +clip_val.\n\n Returns:\n The resulting model gradients (list or tuples of grads + vars)\n corresponding to the input `var_list`.\n \"\"\"\n # Accidentally passing values < 0.0 will break all gradients.\n assert clip_val is None or clip_val > 0.0, clip_val\n\n if tf.executing_eagerly():\n tape = optimizer.tape\n grads_and_vars = list(zip(list(tape.gradient(objective, var_list)), var_list))\n else:\n grads_and_vars = optimizer.compute_gradients(objective, var_list=var_list)\n\n return [\n (tf.clip_by_norm(g, clip_val) if clip_val is not None else g, v)\n for (g, v) in grads_and_vars\n if g is not None\n ]\n\n\n@PublicAPI\ndef one_hot(x: TensorType, space: gym.Space) -> TensorType:\n \"\"\"Returns a one-hot tensor, given and int tensor and a space.\n\n Handles the MultiDiscrete case as well.\n\n Args:\n x: The input tensor.\n space: The space to use for generating the one-hot tensor.\n\n Returns:\n The resulting one-hot tensor.\n\n Raises:\n ValueError: If the given space is not a discrete one.\n\n Examples:\n >>> import gym\n >>> import tensorflow as tf\n >>> from ray.rllib.utils.tf_utils import one_hot\n >>> x = tf.Variable([0, 3], dtype=tf.int32) # batch-dim=2\n >>> # Discrete space with 4 (one-hot) slots per batch item.\n >>> s = gym.spaces.Discrete(4)\n >>> one_hot(x, s) # doctest: +SKIP\n <tf.Tensor 'one_hot:0' shape=(2, 4) dtype=float32>\n >>> x = tf.Variable([[0, 1, 2, 3]], dtype=tf.int32) # batch-dim=1\n >>> # MultiDiscrete space with 5 + 4 + 4 + 7 = 20 (one-hot) slots\n >>> # per batch item.\n >>> s = gym.spaces.MultiDiscrete([5, 4, 4, 7])\n >>> one_hot(x, s) # doctest: +SKIP\n <tf.Tensor 'concat:0' shape=(1, 20) dtype=float32>\n \"\"\"\n if isinstance(space, Discrete):\n return tf.one_hot(x, space.n, dtype=tf.float32)\n elif isinstance(space, MultiDiscrete):\n return tf.concat(\n [\n tf.one_hot(x[:, i], n, dtype=tf.float32)\n for i, n in enumerate(space.nvec)\n ],\n axis=-1,\n )\n else:\n raise ValueError(\"Unsupported space for `one_hot`: {}\".format(space))\n\n\n@PublicAPI\ndef reduce_mean_ignore_inf(x: TensorType, axis: Optional[int] = None) -> TensorType:\n \"\"\"Same as tf.reduce_mean() but ignores -inf values.\n\n Args:\n x: The input tensor to reduce mean over.\n axis: The axis over which to reduce. None for all axes.\n\n Returns:\n The mean reduced inputs, ignoring inf values.\n \"\"\"\n mask = tf.not_equal(x, tf.float32.min)\n x_zeroed = tf.where(mask, x, tf.zeros_like(x))\n return tf.math.reduce_sum(x_zeroed, axis) / tf.math.reduce_sum(\n tf.cast(mask, tf.float32), axis\n )\n\n\n@PublicAPI\ndef scope_vars(\n scope: Union[str, \"tf1.VariableScope\"], trainable_only: bool = False\n) -> List[\"tf.Variable\"]:\n \"\"\"Get variables inside a given scope.\n\n Args:\n scope: Scope in which the variables reside.\n trainable_only: Whether or not to return only the variables that were\n marked as trainable.\n\n Returns:\n The list of variables in the given `scope`.\n \"\"\"\n return tf1.get_collection(\n tf1.GraphKeys.TRAINABLE_VARIABLES\n if trainable_only\n else tf1.GraphKeys.VARIABLES,\n scope=scope if isinstance(scope, str) else scope.name,\n )\n\n\n@PublicAPI\ndef zero_logps_from_actions(actions: TensorStructType) -> TensorType:\n \"\"\"Helper function useful for returning dummy logp's (0) for some actions.\n\n Args:\n actions: The input actions. This can be any struct\n of complex action components or a simple tensor of different\n dimensions, e.g. [B], [B, 2], or {\"a\": [B, 4, 5], \"b\": [B]}.\n\n Returns:\n A 1D tensor of 0.0 (dummy logp's) matching the batch\n dim of `actions` (shape=[B]).\n \"\"\"\n # Need to flatten `actions` in case we have a complex action space.\n # Take the 0th component to extract the batch dim.\n action_component = tree.flatten(actions)[0]\n logp_ = tf.zeros_like(action_component, dtype=tf.float32)\n # Logp's should be single values (but with the same batch dim as\n # `deterministic_actions` or `stochastic_actions`). In case\n # actions are just [B], zeros_like works just fine here, but if\n # actions are [B, ...], we have to reduce logp back to just [B].\n while len(logp_.shape) > 1:\n logp_ = logp_[:, 0]\n return logp_\n"
] | [
[
"numpy.arange",
"numpy.mean"
],
[
"numpy.zeros"
],
[
"tensorflow.python.client.device_lib.list_local_devices"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
jaayeon/emotion_classification | [
"5514360803b80f1b3dc607c077a14785d9a01669",
"5514360803b80f1b3dc607c077a14785d9a01669"
] | [
"Resnet_GRU/data_loader.py",
"VGG_GRU/main.py"
] | [
"#!/usr/bin/python\r\n# encoding: utf-8\r\n\r\nimport glob\r\nimport os\r\nimport random\r\nimport torch\r\nimport numpy as np\r\nfrom torch.utils import data\r\nimport cv2\r\nfrom PIL import Image\r\nfrom utils import *\r\nfrom torchvision import transforms\r\nimport random\r\nimport numpy as np\r\nimport random\r\n\r\nclass FER(data.Dataset):\r\n def __init__(self, opt, mode):\r\n\r\n self.opt = opt\r\n self.mode = mode\r\n self.img_size = opt.img_size\r\n self.length = opt.length\r\n self.iter = opt.iter\r\n # self.img_list = glob.glob(os.path.join(opt.train_dir,'**', '*.jpg'), recursive=True)\r\n if self.mode == 'train':\r\n img_list_0 = glob.glob(os.path.join(opt.train_dir, 'Not_Understand', '*.jpg'))\r\n img_list_1 = glob.glob(os.path.join(opt.train_dir, 'Neutral', '*.jpg'))\r\n img_list_2 = glob.glob(os.path.join(opt.train_dir, 'Understand', '*.jpg'))\r\n elif self.mode == 'valid':\r\n img_list_0 = glob.glob(os.path.join(opt.valid_dir, 'Not_Understand', '*.jpg'))\r\n img_list_1 = glob.glob(os.path.join(opt.valid_dir, 'Neutral', '*.jpg'))\r\n img_list_2 = glob.glob(os.path.join(opt.valid_dir, 'Understand', '*.jpg'))\r\n self.img_list_0 = sorted(img_list_0)\r\n self.img_list_1 = sorted(img_list_1)\r\n self.img_list_2 = sorted(img_list_2)\r\n self.len0 = len(self.img_list_0)\r\n self.len1 = len(self.img_list_1)\r\n self.len2 = len(self.img_list_2)\r\n # print('Number of each class images >> len0 : {}, len1 : {}, len2 : {}'.format(self.len0, self.len1, self.len2))\r\n\r\n\r\n def __getitem__(self, index): #L,C,H,W\r\n \r\n r = np.random.randint(9)\r\n img_path = []\r\n seq = np.zeros((self.length, 1, self.img_size, self.img_size))\r\n\r\n if self.mode =='train' or self.mode =='valid':\r\n if (r%3) == 0: img_list = self.img_list_0; num = self.len0\r\n elif (r%3) == 1: img_list = self.img_list_1; num = self.len1\r\n else : img_list = self.img_list_2; num = self.len2\r\n\r\n idx =random.sample(range(num), self.length)\r\n for i, img_num in enumerate(idx) : \r\n img_path.append(img_list[img_num])\r\n img = cv2.imread(img_list[img_num], cv2.IMREAD_GRAYSCALE)\r\n aug_img = self.transform(img)\r\n # print('aug_img.shape :',aug_img.shape)\r\n seq[i, :, :, :] = aug_img\r\n\r\n seq = torch.from_numpy(seq).float()\r\n label=int(img_path[0].split('_')[-1].split('.')[0]) #0-not understand ,1-neutral ,2-understand\r\n label = torch.LongTensor([label])\r\n # print('FER/ img_path : {}, label : {}'.format(img_path[0].split('\\\\')[-1], label))\r\n return seq, label\r\n\r\n else :\r\n img=self.get_real_data()\r\n return img\r\n\r\n def __len__(self):\r\n if self.mode == 'train':\r\n if self.iter:\r\n return self.iter\r\n else : \r\n return int(((self.len0 + self.len1 + self.len2)))\r\n elif self.mode == 'valid': #valid는 iter에 상관없이 항상 모든 데이터 보게끔\r\n return int(((self.len0 + self.len1 + self.len2)))\r\n\r\n\r\n\r\n def transform(self, img):\r\n ndim = img.ndim\r\n # print('data_loader img.shape : ',img.shape)\r\n if ndim==2:\r\n img = np.expand_dims(img, axis=0)\r\n else :\r\n h,w,c=img.shape\r\n if c==3:\r\n #color to gray\r\n pass\r\n aug_img = self.augment(img)\r\n # print('data_loader aug_img.shape : ',aug_img.shape)\r\n return aug_img\r\n\r\n\r\n def augment(self, img, hflip=True, rot=True): #c,h,w\r\n hflip = hflip and random.random() < 0.5\r\n vflip = rot and random.random() < 0.5\r\n rot90 = rot and random.random() < 0.5\r\n\r\n if hflip: img=img[:,:,::-1].copy()\r\n if vflip: img=img[:,::-1,:].copy()\r\n if rot90: img=img.transpose(0,2,1).copy()\r\n\r\n return img\r\n\r\n\r\n\r\n #for real-time\r\n def face_detection(self):\r\n #face detect\r\n #to gray scale\r\n pass\r\n\r\n #for real-time\r\n def get_real_data(self):\r\n img_shape=(1,self.img_size, self.img_size)\r\n crop_img=self.face_detection()\r\n #resize\r\n resize_img=np.resize(crop_img, img_shape)\r\n aug_img = self.augment(resize_img)\r\n return aug_img\r\n\r\n\r\n\r\ndef get_dataloader(opt, mode):\r\n\r\n dataset = FER(opt, mode)\r\n length = len(dataset)\r\n\r\n # print('Length of {} dataloader : {}'.format(opt.mode, length))\r\n if mode == 'train':\r\n dataloader = data.DataLoader(dataset=dataset,\r\n batch_size=1,\r\n shuffle=True,\r\n pin_memory=True,\r\n num_workers=opt.num_workers)\r\n elif mode == 'valid':\r\n dataloader = data.DataLoader(dataset=dataset,\r\n batch_size=opt.batch_size,\r\n shuffle=False,\r\n pin_memory=True,\r\n num_workers=opt.num_workers)\r\n \r\n return dataloader",
"## cuda 사용\r\n\r\nfrom __future__ import print_function\t# for compatibility\r\nimport os\r\nimport time\r\nimport numpy\r\nimport random\r\nimport dataset\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nimport torch.backends.cudnn as cudnn\r\nfrom torchvision import datasets, transforms\r\nfrom torch.autograd import Variable\r\nfrom torchvision import models\r\nfrom utils import * # *: utils 안의 모든 이름에 access\r\nfrom VGG_gru import VggNet\r\n# from tensorboardX import SummaryWriter\r\nimport argparse\r\nimport subprocess # 다른 언어로 만들어진 프로그램을 통합 ex) for 오래된 os.*\r\nfrom tqdm import tqdm\r\nfrom data_loader import FER, get_dataloader\r\n\r\ndef train(opt, epoch, model, optimizer, loss_function, train_loader):\r\n\r\n\tstart_time = time.time()\r\n\r\n\tfor param_group in optimizer.param_groups:\r\n\t\ttrain_learning_rate = float(param_group['lr'])\r\n\r\n\t# logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), train_learning_rate))\r\n\r\n\trunning_loss = 0.0\r\n\r\n\tmodel.train()\r\n\tprint('[*]Training...')\r\n\tprint('Epoch {}/{}'.format(epoch, opt.epochs))\r\n\tfor batch_idx, (data, label) in enumerate(tqdm(train_loader)):\r\n\r\n\r\n\t\tdata = data.squeeze(0)\r\n\t\tdata = Variable(data).to(opt.device)\r\n\r\n\t\tlabel = Variable(label.long()).to(opt.device)\r\n\t\tlabel = label.squeeze(1)\t\t\r\n\t\t# print('main.py label: ',label)\r\n\t\t# print(\"main.py size(label): \",label.shape)\r\n\t\t\r\n\t\toptimizer.zero_grad()\r\n\r\n\t\toutput = model(data)\r\n\t\t# print(\"main.py output : \",output)\r\n\t\t# print('main.py output.shape :', output.shape)\r\n\r\n\t\tloss = loss_function(output, label)\r\n\t\t# print('loss.item()',loss.item())\r\n\r\n\t\trunning_loss += loss.item()\r\n\t\tprint(' Training [{}/{}] {:.2f}sec.. loss : {:.6f}, running_loss : {:.6f}'.format(batch_idx,len(train_loader), time.time()-start_time, loss.item(), running_loss/(batch_idx+1)))\r\n\r\n\t\tloss.backward()\r\n\r\n\t\toptimizer.step()\r\n\t\r\n\tprint('==> total {:.2f}sec.. Loss:{:.6f}'\r\n\t\t\t\t.format(epoch, opt.epochs, time.time()-start_time, running_loss/(batch_idx+1)))\r\n\r\n\r\ndef valid(opt, epoch, model, valid_loader, metric):\r\n\r\n\tstart_time = time.time()\r\n\r\n\tmetric.reset()\r\n\r\n\tmodel.eval()\r\n\r\n\tprint('[*]Validation...')\r\n\tprint('Epoch {}/{}'.format(epoch, opt.epochs))\r\n\r\n\twith torch.no_grad():\r\n\t\tfor batch_idx, (data, label) in enumerate(tqdm(valid_loader)):\r\n\r\n\t\t\tBatch,T,C,H,W = data.size()\r\n\t\t\t\r\n\t\t\tdata = data.squeeze(0) \r\n\t\t\tdata = Variable(data).to(opt.device)\r\n\r\n\r\n\t\t\tlabel = Variable(label.long()).to(opt.device)\r\n\t\t\tlabel = label.squeeze(1)\r\n\r\n\t\t\toutput = []\r\n\t\t\tfor batch_index in range(Batch):\r\n\t\t\t\t# print('batch: ',Batch)\r\n\t\t\t\t# print('batch idx: ',batch_index)\r\n\t\t\t\t# print('@@ ',data[batch_index].shape)\r\n\t\t\t\toutput_feature = model(data[batch_index])\r\n\t\t\t\toutput.append(output_feature)\r\n\t\t\t# print('output: ',output)\r\n\r\n\t\t\toutput = torch.cat(output,0)\r\n\t\t\t# print('concat output :',output)\r\n\t\t\t\r\n\t\t\tmetric(output, label)\r\n\t\t\taccuracy,eval_loss = metric.value()\r\n\t\t\tavg_loss = eval_loss/((batch_idx+1)*opt.batch_size)\r\n\t\t\t\r\n\t\t\tprint('Validation [{}/{}] accuracy : {:.2f}, loss : {:.6f}'.format(batch_idx, len(valid_loader), accuracy, avg_loss))\r\n\t\t\tprint('acc & loss:',accuracy, '&', eval_loss)\r\n\r\n\tprint('[*]Validation...')\r\n\tprint('Epoch {}/{}'.format(epoch, opt.epochs))\r\n\r\n\treturn accuracy, avg_loss\t\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t# os.environ['CUDA_VISIBLE_DEVICES'] = \"1\"\r\n\r\n\tdata_dir = '../../../../data/face_data'\r\n\ttrain_dir = os.path.join(data_dir, 'train')\r\n\tcheckpoint_dir = os.path.join(data_dir, 'checkpoint')\r\n\ttest_dir = os.path.join(data_dir, 'val')\r\n\tresult_dir = os.path.join(data_dir, 'result')\r\n\r\n\tparser = argparse.ArgumentParser(description='PyTorch Facial Expression')\r\n\r\n\tparser.add_argument('--batch_size', type=int, default=16,\r\n\t\t\t\t\t\thelp='input batch size for valid/train = 1')\r\n\tparser.add_argument('--img_size', type=int, default=64)\r\n\tparser.add_argument('--epochs', type=int, default=200,\r\n\t\t\t\t\t\thelp='number of epochs to train (default: 200)')\r\n\tparser.add_argument('--start_epoch', default=1, type=int)\r\n\tparser.add_argument('--iter', type=int, default=0,\r\n\t\t\t\t\t\thelp='number of iters for each epoch, if iter is 0, it means number of images')\t\t\t\t\t\r\n\tparser.add_argument('--length', type=int, default=4,\r\n\t\t\t\t\t\thelp='data shape : (b, <<l>>, c, h, w) | meaning batch in training | for making each batch containing same class')\r\n\tparser.add_argument('--lr', type=float, default=1e-05)\r\n\tparser.add_argument('--num_workers', type=int, default=2)\r\n\tparser.add_argument('--optim', type=str, default='sgd', choices=['adam', 'sgd'])\r\n\r\n\t# parser.add_argument('--multi_gpu', default=False, action='store_true',\r\n\t#\t\t\t\t\thelp='Use Multi GPU')\r\n\t\r\n\tparser.add_argument('--no_multi_gpu', default=False, action='store_true',\r\n\t\t\t\t\t\thelp='Do Not Use Multi GPUs')\r\n\tparser.add_argument('--device', type=str, choices=['cpu', 'cuda'], default='cpu')\r\n\r\n\tparser.add_argument('--data_dir', type=str, default=data_dir,\r\n\t\t\t\t\t\thelp='dataset path')\r\n\tparser.add_argument('--train_dir', type=str, default=train_dir)\r\n\tparser.add_argument('--checkpoint_dir', type=str, default=checkpoint_dir)\r\n\tparser.add_argument('--valid_dir', type=str, default=test_dir)\r\n\tparser.add_argument('--result_dir', type=str, default=result_dir)\r\n\r\n\tparser.add_argument('--mode', type=str, default='train', choices=['train', 'valid', 'test'])\r\n\tparser.add_argument('--resume', action='store_true', default=False)\r\n\tparser.add_argument('--resume_best', action='store_true', default=False)\r\n\r\n\topt = parser.parse_args()\r\n\r\n\t# torch.manual_seed(1)\r\n\ttorch.cuda.manual_seed(1)\r\n\ttorch.backends.cudnn.benchmark = False\r\n\r\n\tprint(opt)\r\n\r\n\t###here for same result#####\r\n\ttorch.backends.cudnn.enabled = False\r\n\ttorch.backends.cudnn.deterministic = False\r\n\r\n\tif torch.cuda.is_available():\r\n\t\tprint('Setting GPU')\r\n\t\tprint('===> CUDA Available: ', torch.cuda.is_available())\r\n\t\topt.device = 'cuda'\r\n\r\n\t\tif torch.cuda.device_count() > 1 and not opt.no_multi_gpu:\r\n\t\t\tprint('===> Use {} Multi GPUs'.format(torch.cuda.device_count()))\r\n\t\telse :\r\n\t\t\topt.no_multi_gpu = True\r\n\r\n\telse : \r\n\t\tprint('Using only CPU')\r\n\r\n\tprint('Initialize networks')\r\n\tmodel = VggNet()\r\n\tmodel = model.to(opt.device)\r\n\r\n\tprint(\"Setting Optimizer & loss\")\r\n\tif opt.optim == 'sgd':\r\n\t\toptimizer = optim.SGD(model.parameters(), lr=opt.lr , momentum=0.9, weight_decay= 0.00005)\r\n\telse :\r\n\t\toptimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(0.9,0.999), eps=1e-8, weight_decay=0.00005)\r\n\t\r\n\tloss_function = nn.CrossEntropyLoss()\r\n\r\n\tif opt.resume or opt.resume_best:\r\n\t\topt.start_epoch, model, optimizer = load_model(opt, model, optimizer=optimizer)\r\n\t\r\n\tif opt.no_multi_gpu:\r\n\t\tmodel = nn.DataParallel(model)\r\n\r\n\ttrain_data_loader = get_dataloader(opt,'train')\r\n\tvalid_data_loader = get_dataloader(opt,'valid')\r\n\r\n\tmetric = AccumulatedAccuracyMetric()\r\n\tpre_valid_loss = float('inf')\r\n\r\n\tfor epoch in range(opt.start_epoch, opt.epochs+1): \r\n\t\topt.mode = 'train'\r\n\t\ttrain(opt, epoch, model, optimizer, loss_function, train_data_loader)\r\n\r\n\t\topt.mode = 'valid'\r\n\t\tvalid_acc, valid_loss = valid(opt, epoch, model, valid_data_loader, metric)\r\n\r\n\t\tif pre_valid_loss > valid_loss:\r\n\t\t\tpre_valid_loss = valid_loss\r\n\t\t\tsave_checkpoint(opt, model, optimizer, epoch, valid_loss, valid_acc)\r\n\t\t# eval_accuary,eval_loss = eval(epoch,metric)"
] | [
[
"torch.LongTensor",
"numpy.resize",
"numpy.expand_dims",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"numpy.zeros",
"numpy.random.randint"
],
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.manual_seed",
"torch.cat",
"torch.cuda.device_count",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pinsleepe/great_expectations | [
"37329c906a5a159b54257dbcd897850177eecbcc"
] | [
"tests/execution_engine/test_sparkdf_execution_engine.py"
] | [
"import datetime\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.batch_spec import (\n AzureBatchSpec,\n GCSBatchSpec,\n PathBatchSpec,\n RuntimeDataBatchSpec,\n S3BatchSpec,\n)\nfrom great_expectations.execution_engine import SparkDFExecutionEngine\nfrom great_expectations.execution_engine.execution_engine import MetricDomainTypes\nfrom great_expectations.expectations.row_conditions import (\n RowCondition,\n RowConditionParserType,\n)\nfrom great_expectations.self_check.util import build_spark_engine\nfrom great_expectations.validator.metric_configuration import MetricConfiguration\nfrom tests.expectations.test_util import get_table_columns_metric\nfrom tests.test_utils import create_files_in_directory\n\ntry:\n pyspark = pytest.importorskip(\"pyspark\")\n # noinspection PyPep8Naming\n import pyspark.sql.functions as F\n from pyspark.sql.types import IntegerType, LongType, Row, StringType\nexcept ImportError:\n pyspark = None\n F = None\n IntegerType = None\n LongType = None\n StringType = None\n Row = None\n\n\[email protected]\ndef test_sparkdf(spark_session):\n def generate_ascending_list_of_datetimes(\n n, start_date=datetime.date(2020, 1, 1), end_date=datetime.date(2020, 12, 31)\n ):\n start_time = datetime.datetime(\n start_date.year, start_date.month, start_date.day\n )\n seconds_between_dates = (end_date - start_date).total_seconds()\n # noinspection PyUnusedLocal\n datetime_list = [\n start_time\n + datetime.timedelta(seconds=random.randrange(int(seconds_between_dates)))\n for i in range(n)\n ]\n datetime_list.sort()\n return datetime_list\n\n k = 120\n random.seed(1)\n timestamp_list = generate_ascending_list_of_datetimes(\n n=k, end_date=datetime.date(2020, 1, 31)\n )\n date_list = [datetime.date(ts.year, ts.month, ts.day) for ts in timestamp_list]\n\n # noinspection PyUnusedLocal\n batch_ids = [random.randint(0, 10) for i in range(k)]\n batch_ids.sort()\n # noinspection PyUnusedLocal\n session_ids = [random.randint(2, 60) for i in range(k)]\n session_ids = [i - random.randint(0, 2) for i in session_ids]\n session_ids.sort()\n\n # noinspection PyUnusedLocal\n spark_df = spark_session.createDataFrame(\n data=pd.DataFrame(\n {\n \"id\": range(k),\n \"batch_id\": batch_ids,\n \"date\": date_list,\n \"y\": [d.year for d in date_list],\n \"m\": [d.month for d in date_list],\n \"d\": [d.day for d in date_list],\n \"timestamp\": timestamp_list,\n \"session_ids\": session_ids,\n \"event_type\": [\n random.choice([\"start\", \"stop\", \"continue\"]) for i in range(k)\n ],\n \"favorite_color\": [\n \"#\"\n + \"\".join(\n [random.choice(list(\"0123456789ABCDEF\")) for j in range(6)]\n )\n for i in range(k)\n ],\n }\n )\n )\n spark_df = spark_df.withColumn(\n \"timestamp\", F.col(\"timestamp\").cast(IntegerType()).cast(StringType())\n )\n return spark_df\n\n\[email protected]\ndef spark_df_from_pandas_df():\n \"\"\"\n Construct a spark dataframe from pandas dataframe.\n Returns:\n Function that can be used in your test e.g.:\n spark_df = spark_df_from_pandas_df(spark_session, pandas_df)\n \"\"\"\n\n def _construct_spark_df_from_pandas(\n spark_session,\n pandas_df,\n ):\n\n spark_df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in pandas_df.to_records(index=False)\n ],\n pandas_df.columns.tolist(),\n )\n return spark_df\n\n return _construct_spark_df_from_pandas\n\n\ndef test_reader_fn(spark_session, basic_spark_df_execution_engine):\n engine = basic_spark_df_execution_engine\n # Testing that can recognize basic csv file\n fn = engine._get_reader_fn(reader=spark_session.read, path=\"myfile.csv\")\n assert \"<bound method DataFrameReader.csv\" in str(fn)\n\n # Ensuring that other way around works as well - reader_method should always override path\n fn_new = engine._get_reader_fn(reader=spark_session.read, reader_method=\"csv\")\n assert \"<bound method DataFrameReader.csv\" in str(fn_new)\n\n\ndef test_reader_fn_parameters(\n spark_session, basic_spark_df_execution_engine, tmp_path_factory\n):\n base_directory = str(tmp_path_factory.mktemp(\"test_csv\"))\n create_files_in_directory(\n directory=base_directory,\n file_name_list=[\n \"test-A.csv\",\n ],\n )\n test_df_small_csv_path = base_directory + \"/test-A.csv\"\n engine = basic_spark_df_execution_engine\n fn = engine._get_reader_fn(reader=spark_session.read, path=test_df_small_csv_path)\n assert \"<bound method DataFrameReader.csv\" in str(fn)\n\n test_sparkdf_with_header_param = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=test_df_small_csv_path,\n data_asset_name=\"DATA_ASSET\",\n reader_options={\"header\": True},\n )\n ).dataframe\n assert test_sparkdf_with_header_param.head() == Row(x=\"1\", y=\"2\")\n\n test_sparkdf_with_no_header_param = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(path=test_df_small_csv_path, data_asset_name=\"DATA_ASSET\")\n ).dataframe\n assert test_sparkdf_with_no_header_param.head() == Row(_c0=\"x\", _c1=\"y\")\n\n\ndef test_get_domain_records_with_column_domain(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [2, 3, 4, 5, None], \"c\": [1, 2, 3, 4, None]}\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"a\",\n \"row_condition\": 'col(\"b\")<5',\n \"condition_parser\": \"great_expectations__experimental__\",\n }\n )\n\n expected_column_pd_df = pd_df.iloc[:3]\n expected_column_df = spark_df_from_pandas_df(spark_session, expected_column_pd_df)\n\n assert dataframes_equal(\n data, expected_column_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_column_domain_and_filter_conditions(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [2, 3, 4, 5, None], \"c\": [1, 2, 3, 4, None]}\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"a\",\n \"row_condition\": 'col(\"b\")<5',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"filter_conditions\": [\n RowCondition(\n condition=\"b IS NOT NULL\",\n condition_type=RowConditionParserType.SPARK_SQL,\n )\n ],\n }\n )\n\n expected_column_pd_df = pd_df.iloc[:3]\n expected_column_df = spark_df_from_pandas_df(spark_session, expected_column_pd_df)\n\n assert dataframes_equal(\n data, expected_column_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_different_column_domain_and_filter_conditions(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [2, 3, 4, 5, None], \"c\": [1, 2, 3, 4, None]}\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"a\",\n \"row_condition\": 'col(\"a\")<2',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"filter_conditions\": [\n RowCondition(\n condition=\"b IS NOT NULL\",\n condition_type=RowConditionParserType.SPARK_SQL,\n )\n ],\n }\n )\n\n expected_column_pd_df = pd_df.iloc[:1]\n expected_column_df = spark_df_from_pandas_df(spark_session, expected_column_pd_df)\n\n assert dataframes_equal(\n data, expected_column_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_different_column_domain_and_multiple_filter_conditions(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [2, 3, 4, 5, None], \"c\": [1, 2, 3, 4, None]}\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"a\",\n \"row_condition\": 'col(\"a\")<10',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"filter_conditions\": [\n RowCondition(\n condition=\"b IS NOT NULL\",\n condition_type=RowConditionParserType.SPARK_SQL,\n ),\n RowCondition(\n condition=\"NOT isnan(b)\",\n condition_type=RowConditionParserType.SPARK_SQL,\n ),\n ],\n }\n )\n\n expected_column_pd_df = pd_df.iloc[:4]\n expected_column_df = spark_df_from_pandas_df(spark_session, expected_column_pd_df)\n\n assert dataframes_equal(\n data, expected_column_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_column_pair_domain(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [2, 3, 4, 5, None, 6],\n \"c\": [1, 2, 3, 4, 5, None],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n \"row_condition\": 'col(\"b\")>2',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"both_values_are_missing\",\n }\n )\n\n expected_column_pair_pd_df = pd.DataFrame(\n {\"a\": [2, 3, 4, 6], \"b\": [3.0, 4.0, 5.0, 6.0], \"c\": [2.0, 3.0, 4.0, None]}\n )\n expected_column_pair_df = spark_df_from_pandas_df(\n spark_session, expected_column_pair_pd_df\n )\n\n assert dataframes_equal(\n data, expected_column_pair_df\n ), \"Data does not match after getting full access compute domain\"\n\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [2, 3, 4, 5, None, 6],\n \"c\": [1, 2, 3, 4, 5, None],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n \"row_condition\": 'col(\"b\")>2',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"either_value_is_missing\",\n }\n )\n for column_name in data.columns:\n data = data.withColumn(column_name, data[column_name].cast(LongType()))\n\n expected_column_pair_pd_df = pd.DataFrame(\n {\"a\": [2, 3, 4], \"b\": [3, 4, 5], \"c\": [2, 3, 4]}\n )\n expected_column_pair_df = spark_df_from_pandas_df(\n spark_session, expected_column_pair_pd_df\n )\n\n assert dataframes_equal(\n data, expected_column_pair_df\n ), \"Data does not match after getting full access compute domain\"\n\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [2, 3, 4, 5, None, 6],\n \"c\": [1, 2, 3, 4, 5, None],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_A\": \"b\",\n \"column_B\": \"c\",\n \"row_condition\": 'col(\"a\")<6',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"neither\",\n }\n )\n\n expected_column_pair_pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5],\n \"b\": [2.0, 3.0, 4.0, 5.0, None],\n \"c\": [1.0, 2.0, 3.0, 4.0, 5.0],\n }\n )\n expected_column_pair_df = spark_df_from_pandas_df(\n spark_session, expected_column_pair_pd_df\n )\n\n assert dataframes_equal(\n data, expected_column_pair_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_domain_records_with_multicolumn_domain(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, 5],\n \"b\": [2, 3, 4, 5, 6, 7],\n \"c\": [1, 2, 3, 4, None, 6],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_list\": [\"a\", \"c\"],\n \"row_condition\": 'col(\"b\")>2',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"all_values_are_missing\",\n }\n )\n for column_name in data.columns:\n data = data.withColumn(column_name, data[column_name].cast(LongType()))\n\n expected_multicolumn_pd_df = pd.DataFrame(\n {\"a\": [2, 3, 4, 5], \"b\": [3, 4, 5, 7], \"c\": [2, 3, 4, 6]}, index=[0, 1, 2, 4]\n )\n expected_multicolumn_df = spark_df_from_pandas_df(\n spark_session, expected_multicolumn_pd_df\n )\n\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=expected_multicolumn_df)\n\n assert dataframes_equal(\n data, expected_multicolumn_df\n ), \"Data does not match after getting full access compute domain\"\n\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6],\n \"b\": [2, 3, 4, 5, None, 6],\n \"c\": [1, 2, 3, 4, 5, None],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_list\": [\"b\", \"c\"],\n \"row_condition\": 'col(\"a\")<5',\n \"condition_parser\": \"great_expectations__experimental__\",\n \"ignore_row_if\": \"any_value_is_missing\",\n }\n )\n for column_name in data.columns:\n data = data.withColumn(column_name, data[column_name].cast(LongType()))\n\n expected_multicolumn_pd_df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, 5], \"c\": [1, 2, 3, 4]}, index=[0, 1, 2, 3]\n )\n\n expected_multicolumn_df = spark_df_from_pandas_df(\n spark_session, expected_multicolumn_pd_df\n )\n\n assert dataframes_equal(\n data, expected_multicolumn_df\n ), \"Data does not match after getting full access compute domain\"\n\n pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, 5],\n \"b\": [2, 3, 4, 5, 6, 7],\n \"c\": [1, 2, 3, 4, None, 6],\n }\n )\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data = engine.get_domain_records(\n domain_kwargs={\n \"column_list\": [\"b\", \"c\"],\n \"ignore_row_if\": \"never\",\n }\n )\n\n expected_multicolumn_pd_df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, 5],\n \"b\": [2, 3, 4, 5, 6, 7],\n \"c\": [1, 2, 3, 4, None, 6],\n },\n index=[0, 1, 2, 3, 4, 5],\n )\n\n expected_multicolumn_df = spark_df_from_pandas_df(\n spark_session, expected_multicolumn_pd_df\n )\n\n assert dataframes_equal(\n data, expected_multicolumn_df\n ), \"Data does not match after getting full access compute domain\"\n\n\ndef test_get_compute_domain_with_no_domain_kwargs(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={}, domain_type=MetricDomainTypes.TABLE\n )\n assert compute_kwargs is not None, \"Compute domain kwargs should be existent\"\n assert accessor_kwargs == {}\n assert data.schema == df.schema\n assert data.collect() == df.collect()\n\n\ndef test_get_compute_domain_with_column_domain(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"column\": \"a\"}, domain_type=MetricDomainTypes.COLUMN\n )\n assert compute_kwargs is not None, \"Compute domain kwargs should be existent\"\n assert accessor_kwargs == {\"column\": \"a\"}\n assert data.schema == df.schema\n assert data.collect() == df.collect()\n\n\ndef test_get_compute_domain_with_row_condition(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n expected_df = df.filter(F.col(\"b\") > 2)\n\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"row_condition\": \"b > 2\", \"condition_parser\": \"spark\"},\n domain_type=MetricDomainTypes.TABLE,\n )\n # Ensuring data has been properly queried\n assert data.schema == expected_df.schema\n assert data.collect() == expected_df.collect()\n\n # Ensuring compute kwargs have not been modified\n assert (\n \"row_condition\" in compute_kwargs.keys()\n ), \"Row condition should be located within compute kwargs\"\n assert accessor_kwargs == {}\n\n\n# What happens when we filter such that no value meets the condition?\ndef test_get_compute_domain_with_unmeetable_row_condition(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n expected_df = df.filter(F.col(\"b\") > 24)\n\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"row_condition\": \"b > 24\", \"condition_parser\": \"spark\"},\n domain_type=MetricDomainTypes.TABLE,\n )\n # Ensuring data has been properly queried\n assert data.schema == expected_df.schema\n assert data.collect() == expected_df.collect()\n\n # Ensuring compute kwargs have not been modified\n assert \"row_condition\" in compute_kwargs.keys()\n assert accessor_kwargs == {}\n\n\ndef test_basic_setup(\n spark_session, basic_spark_df_execution_engine, spark_df_from_pandas_df\n):\n pd_df = pd.DataFrame({\"x\": range(10)})\n df = spark_df_from_pandas_df(spark_session, pd_df)\n batch_data = basic_spark_df_execution_engine.get_batch_data(\n batch_spec=RuntimeDataBatchSpec(\n batch_data=df,\n data_asset_name=\"DATA_ASSET\",\n )\n ).dataframe\n assert batch_data is not None\n\n\ndef test_get_batch_data(test_sparkdf, basic_spark_df_execution_engine):\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(batch_data=test_sparkdf, data_asset_name=\"DATA_ASSET\")\n ).dataframe\n assert test_sparkdf.count() == 120\n assert len(test_sparkdf.columns) == 10\n\n\ndef test_get_batch_empty_splitter(\n test_folder_connection_path_csv, basic_spark_df_execution_engine\n):\n # reader_method not configured because spark will configure own reader by default\n # reader_options are needed to specify the fact that the first line of test file is the header\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=os.path.join(test_folder_connection_path_csv, \"test.csv\"),\n reader_options={\"header\": True},\n splitter_method=None,\n )\n ).dataframe\n assert test_sparkdf.count() == 5\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_empty_splitter_tsv(\n test_folder_connection_path_tsv, basic_spark_df_execution_engine\n):\n # reader_method not configured because spark will configure own reader by default\n # reader_options are needed to specify the fact that the first line of test file is the header\n # reader_options are also needed to specify the separator (otherwise, comma will be used as the default separator)\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=os.path.join(test_folder_connection_path_tsv, \"test.tsv\"),\n reader_options={\"header\": True, \"sep\": \"\\t\"},\n splitter_method=None,\n )\n ).dataframe\n assert test_sparkdf.count() == 5\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_empty_splitter_parquet(\n test_folder_connection_path_parquet, basic_spark_df_execution_engine\n):\n # Note: reader method and reader_options are not needed, because\n # SparkDFExecutionEngine automatically determines the file type as well as the schema of the Parquet file.\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=os.path.join(test_folder_connection_path_parquet, \"test.parquet\"),\n splitter_method=None,\n )\n ).dataframe\n assert test_sparkdf.count() == 5\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_whole_table_runtime(\n test_sparkdf, basic_spark_df_execution_engine\n):\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf, splitter_method=\"_split_on_whole_table\"\n )\n ).dataframe\n assert test_sparkdf.count() == 120\n assert len(test_sparkdf.columns) == 10\n\n\ndef test_get_batch_with_split_on_whole_table_filesystem(\n test_folder_connection_path_csv, basic_spark_df_execution_engine\n):\n # reader_method not configured because spark will configure own reader by default\n test_sparkdf = basic_spark_df_execution_engine.get_batch_data(\n PathBatchSpec(\n path=os.path.join(test_folder_connection_path_csv, \"test.csv\"),\n splitter_method=\"_split_on_whole_table\",\n )\n ).dataframe\n assert test_sparkdf.count() == 6\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_whole_table_s3(\n spark_session, basic_spark_df_execution_engine\n):\n # noinspection PyUnusedLocal\n def mocked_get_reader_function(*args, **kwargs):\n # noinspection PyUnusedLocal,PyShadowingNames\n def mocked_reader_function(*args, **kwargs):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in pd_df.to_records(index=False)\n ],\n pd_df.columns.tolist(),\n )\n return df\n\n return mocked_reader_function\n\n spark_engine = basic_spark_df_execution_engine\n spark_engine._get_reader_fn = mocked_get_reader_function\n\n test_sparkdf = spark_engine.get_batch_data(\n S3BatchSpec(\n path=\"s3://bucket/test/test.csv\",\n reader_method=\"csv\",\n reader_options={\"header\": True},\n splitter_method=\"_split_on_whole_table\",\n )\n ).dataframe\n assert test_sparkdf.count() == 4\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_whole_table_azure(\n spark_session, basic_spark_df_execution_engine\n):\n # noinspection PyUnusedLocal\n def mocked_get_reader_function(*args, **kwargs):\n # noinspection PyUnusedLocal,PyShadowingNames\n def mocked_reader_function(*args, **kwargs):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in pd_df.to_records(index=False)\n ],\n pd_df.columns.tolist(),\n )\n return df\n\n return mocked_reader_function\n\n spark_engine = basic_spark_df_execution_engine\n spark_engine._get_reader_fn = mocked_get_reader_function\n\n test_sparkdf = spark_engine.get_batch_data(\n AzureBatchSpec(\n path=\"wasbs://test_container@test_account.blob.core.windows.net/test_dir/test_file.csv\",\n reader_method=\"csv\",\n reader_options={\"header\": True},\n splitter_method=\"_split_on_whole_table\",\n )\n ).dataframe\n assert test_sparkdf.count() == 4\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_whole_table_gcs(\n spark_session, basic_spark_df_execution_engine\n):\n # noinspection PyUnusedLocal\n def mocked_get_reader_function(*args, **kwargs):\n # noinspection PyUnusedLocal,PyShadowingNames\n def mocked_reader_function(*args, **kwargs):\n pd_df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]})\n df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in pd_df.to_records(index=False)\n ],\n pd_df.columns.tolist(),\n )\n return df\n\n return mocked_reader_function\n\n spark_engine = basic_spark_df_execution_engine\n spark_engine._get_reader_fn = mocked_get_reader_function\n\n test_sparkdf = spark_engine.get_batch_data(\n GCSBatchSpec(\n path=\"gcs://bucket/test/test.csv\",\n reader_method=\"csv\",\n reader_options={\"header\": True},\n splitter_method=\"_split_on_whole_table\",\n )\n ).dataframe\n assert test_sparkdf.count() == 4\n assert len(test_sparkdf.columns) == 2\n\n\ndef test_get_batch_with_split_on_column_value(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_column_value\",\n splitter_kwargs={\n \"column_name\": \"batch_id\",\n \"batch_identifiers\": {\"batch_id\": 2},\n },\n )\n ).dataframe\n assert test_sparkdf.count() == 120\n assert len(test_sparkdf.columns) == 10\n collected = split_df.collect()\n for val in collected:\n assert val.batch_id == 2\n\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_column_value\",\n splitter_kwargs={\n \"column_name\": \"date\",\n \"batch_identifiers\": {\"date\": datetime.date(2020, 1, 30)},\n },\n )\n ).dataframe\n assert split_df.count() == 3\n assert len(split_df.columns) == 10\n\n\ndef test_get_batch_with_split_on_converted_datetime(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_converted_datetime\",\n splitter_kwargs={\n \"column_name\": \"timestamp\",\n \"batch_identifiers\": {\"timestamp\": \"2020-01-03\"},\n },\n )\n ).dataframe\n assert split_df.count() == 2\n assert len(split_df.columns) == 10\n\n\ndef test_get_batch_with_split_on_divided_integer(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_divided_integer\",\n splitter_kwargs={\n \"column_name\": \"id\",\n \"divisor\": 10,\n \"batch_identifiers\": {\"id\": 5},\n },\n )\n ).dataframe\n assert split_df.count() == 10\n assert len(split_df.columns) == 10\n max_result = split_df.select([F.max(\"id\")])\n assert max_result.collect()[0][\"max(id)\"] == 59\n min_result = split_df.select([F.min(\"id\")])\n assert min_result.collect()[0][\"min(id)\"] == 50\n\n\ndef test_get_batch_with_split_on_mod_integer(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_mod_integer\",\n splitter_kwargs={\n \"column_name\": \"id\",\n \"mod\": 10,\n \"batch_identifiers\": {\"id\": 5},\n },\n )\n ).dataframe\n\n assert split_df.count() == 12\n assert len(split_df.columns) == 10\n max_result = split_df.select([F.max(\"id\")])\n assert max_result.collect()[0][\"max(id)\"] == 115\n min_result = split_df.select([F.min(\"id\")])\n assert min_result.collect()[0][\"min(id)\"] == 5\n\n\ndef test_get_batch_with_split_on_multi_column_values(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_multi_column_values\",\n splitter_kwargs={\n \"column_names\": [\"y\", \"m\", \"d\"],\n \"batch_identifiers\": {\n \"y\": 2020,\n \"m\": 1,\n \"d\": 5,\n },\n },\n )\n ).dataframe\n assert split_df.count() == 4\n assert len(split_df.columns) == 10\n collected = split_df.collect()\n for val in collected:\n assert val.date == datetime.date(2020, 1, 5)\n\n with pytest.raises(ValueError):\n # noinspection PyUnusedLocal\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_multi_column_values\",\n splitter_kwargs={\n \"column_names\": [\"I\", \"dont\", \"exist\"],\n \"batch_identifiers\": {\n \"y\": 2020,\n \"m\": 1,\n \"d\": 5,\n },\n },\n )\n ).dataframe\n\n\ndef test_get_batch_with_split_on_hashed_column_incorrect_hash_function_name(\n test_sparkdf,\n basic_spark_df_execution_engine,\n):\n with pytest.raises(ge_exceptions.ExecutionEngineError):\n # noinspection PyUnusedLocal\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_hashed_column\",\n splitter_kwargs={\n \"column_name\": \"favorite_color\",\n \"hash_digits\": 1,\n \"hash_function_name\": \"I_wont_work\",\n \"batch_identifiers\": {\n \"hash_value\": \"a\",\n },\n },\n )\n ).dataframe\n\n\ndef test_get_batch_with_split_on_hashed_column(\n test_sparkdf, basic_spark_df_execution_engine\n):\n split_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_hashed_column\",\n splitter_kwargs={\n \"column_name\": \"favorite_color\",\n \"hash_digits\": 1,\n \"hash_function_name\": \"sha256\",\n \"batch_identifiers\": {\n \"hash_value\": \"a\",\n },\n },\n )\n ).dataframe\n assert split_df.count() == 8\n assert len(split_df.columns) == 10\n\n\n# ### Sampling methods ###\ndef test_get_batch_empty_sampler(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(batch_data=test_sparkdf, sampling_method=None)\n ).dataframe\n assert sampled_df.count() == 120\n assert len(sampled_df.columns) == 10\n\n\ndef test_sample_using_random(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf, sampling_method=\"_sample_using_random\"\n )\n ).dataframe\n # The test dataframe contains 10 columns and 120 rows.\n assert len(sampled_df.columns) == 10\n assert 0 <= sampled_df.count() <= 120\n # The sampling probability \"p\" used in \"SparkDFExecutionEngine._sample_using_random()\" is 0.1 (the equivalent of an\n # unfair coin with the 10% chance of coming up as \"heads\"). Hence, we should never get as much as 20% of the rows.\n assert sampled_df.count() < 25\n\n\ndef test_sample_using_mod(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n sampling_method=\"_sample_using_mod\",\n sampling_kwargs={\n \"column_name\": \"id\",\n \"mod\": 5,\n \"value\": 4,\n },\n )\n ).dataframe\n assert sampled_df.count() == 24\n assert len(sampled_df.columns) == 10\n\n\ndef test_sample_using_a_list(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n sampling_method=\"_sample_using_a_list\",\n sampling_kwargs={\n \"column_name\": \"id\",\n \"value_list\": [3, 5, 7, 11],\n },\n )\n ).dataframe\n assert sampled_df.count() == 4\n assert len(sampled_df.columns) == 10\n\n\ndef test_sample_using_md5_wrong_hash_function_name(\n test_sparkdf, basic_spark_df_execution_engine\n):\n with pytest.raises(ge_exceptions.ExecutionEngineError):\n # noinspection PyUnusedLocal\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n sampling_method=\"_sample_using_hash\",\n sampling_kwargs={\n \"column_name\": \"date\",\n \"hash_function_name\": \"I_wont_work\",\n },\n )\n ).dataframe\n\n\ndef test_sample_using_md5(test_sparkdf, basic_spark_df_execution_engine):\n sampled_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n sampling_method=\"_sample_using_hash\",\n sampling_kwargs={\n \"column_name\": \"date\",\n \"hash_function_name\": \"md5\",\n },\n )\n ).dataframe\n assert sampled_df.count() == 10\n assert len(sampled_df.columns) == 10\n\n collected = sampled_df.collect()\n for val in collected:\n assert val.date in [datetime.date(2020, 1, 15), datetime.date(2020, 1, 29)]\n\n\ndef test_split_on_multi_column_values_and_sample_using_random(\n test_sparkdf, basic_spark_df_execution_engine\n):\n returned_df = basic_spark_df_execution_engine.get_batch_data(\n RuntimeDataBatchSpec(\n batch_data=test_sparkdf,\n splitter_method=\"_split_on_multi_column_values\",\n splitter_kwargs={\n \"column_names\": [\"y\", \"m\", \"d\"],\n \"batch_identifiers\": {\n \"y\": 2020,\n \"m\": 1,\n \"d\": 5,\n },\n },\n sampling_method=\"_sample_using_random\",\n sampling_kwargs={\n \"p\": 0.5,\n },\n )\n ).dataframe\n\n # The test dataframe contains 10 columns and 120 rows.\n assert len(returned_df.columns) == 10\n # The number of returned rows corresponding to the value of \"batch_identifiers\" above is 4.\n assert 0 <= returned_df.count() <= 4\n # The sampling probability \"p\" used in \"SparkDFExecutionEngine._sample_using_random()\" is 0.5 (the equivalent of a\n # fair coin with the 50% chance of coming up as \"heads\"). Hence, on average we should get 50% of the rows, which is\n # 2; however, for such a small sample (of 4 rows), the number of rows returned by an individual run can deviate from\n # this average. Still, in the majority of trials, the number of rows should not be fewer than 2 or greater than 3.\n # The assertion in the next line, supporting this reasoning, is commented out to insure zero failures. Developers\n # are encouraged to uncomment it, whenever the \"_sample_using_random\" feature is the main focus of a given effort.\n # assert 2 <= returned_df.count() <= 3\n\n for val in returned_df.collect():\n assert val.date == datetime.date(2020, 1, 5)\n\n\ndef test_add_column_row_condition(spark_session, basic_spark_df_execution_engine):\n df = pd.DataFrame({\"foo\": [1, 2, 3, 3, None, 2, 3, 4, 5, 6]})\n df = spark_session.createDataFrame(\n [\n tuple(\n None if isinstance(x, (float, int)) and np.isnan(x) else x\n for x in record.tolist()\n )\n for record in df.to_records(index=False)\n ],\n df.columns.tolist(),\n )\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n domain_kwargs = {\"column\": \"foo\"}\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=True, filter_nan=False\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"foo IS NOT NULL\", condition_type=RowConditionParserType.SPARK_SQL\n )\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n assert res == [(1,), (2,), (3,), (3,), (2,), (3,), (4,), (5,), (6,)]\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=True, filter_nan=True\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"foo IS NOT NULL\", condition_type=RowConditionParserType.SPARK_SQL\n ),\n RowCondition(\n condition=\"NOT isnan(foo)\", condition_type=RowConditionParserType.SPARK_SQL\n ),\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n assert res == [(1,), (2,), (3,), (3,), (2,), (3,), (4,), (5,), (6,)]\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=False, filter_nan=True\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"NOT isnan(foo)\", condition_type=RowConditionParserType.SPARK_SQL\n )\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n assert res == [(1,), (2,), (3,), (3,), (None,), (2,), (3,), (4,), (5,), (6,)]\n\n # This time, our skip value *will* be nan\n df = pd.DataFrame({\"foo\": [1, 2, 3, 3, None, 2, 3, 4, 5, 6]})\n df = spark_session.createDataFrame(df)\n engine = basic_spark_df_execution_engine\n engine.load_batch_data(batch_id=\"1234\", batch_data=df)\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=False, filter_nan=True\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"NOT isnan(foo)\", condition_type=RowConditionParserType.SPARK_SQL\n )\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n assert res == [(1,), (2,), (3,), (3,), (2,), (3,), (4,), (5,), (6,)]\n\n new_domain_kwargs = engine.add_column_row_condition(\n domain_kwargs, filter_null=True, filter_nan=False\n )\n assert new_domain_kwargs[\"filter_conditions\"] == [\n RowCondition(\n condition=\"foo IS NOT NULL\", condition_type=RowConditionParserType.SPARK_SQL\n ),\n ]\n df, cd, ad = engine.get_compute_domain(new_domain_kwargs, domain_type=\"table\")\n res = df.collect()\n expected = [(1,), (2,), (3,), (3,), (np.nan,), (2,), (3,), (4,), (5,), (6,)]\n # since nan != nan by default\n assert np.allclose(res, expected, rtol=0, atol=0, equal_nan=True)\n\n\n# Function to test for spark dataframe equality\ndef dataframes_equal(first_table, second_table):\n if first_table.schema != second_table.schema:\n return False\n if first_table.collect() != second_table.collect():\n return False\n return True\n\n\n# Ensuring that, given aggregate metrics, they can be properly bundled together\ndef test_sparkdf_batch_aggregate_metrics(caplog, spark_session):\n import datetime\n\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 1, 2, 3, 3], \"b\": [4, 4, 4, 4, 4, 4]},\n ),\n batch_id=\"1234\",\n )\n\n metrics: dict = {}\n\n table_columns_metric: MetricConfiguration\n results: dict\n\n table_columns_metric, results = get_table_columns_metric(engine=engine)\n\n metrics.update(results)\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.min.aggregate_fn\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"table.columns\": table_columns_metric,\n },\n )\n results = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n ),\n metrics=metrics,\n )\n metrics.update(results)\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_1,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_2,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_3,\n \"table.columns\": table_columns_metric,\n },\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n metric_dependencies={\n \"metric_partial_fn\": desired_metric_4,\n \"table.columns\": table_columns_metric,\n },\n )\n start = datetime.datetime.now()\n caplog.clear()\n caplog.set_level(logging.DEBUG, logger=\"great_expectations\")\n results = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n ),\n metrics=metrics,\n )\n metrics.update(results)\n end = datetime.datetime.now()\n print(end - start)\n assert metrics[desired_metric_1.id] == 3\n assert metrics[desired_metric_2.id] == 1\n assert metrics[desired_metric_3.id] == 4\n assert metrics[desired_metric_4.id] == 4\n\n # Check that all four of these metrics were computed on a single domain\n found_message = False\n for record in caplog.records:\n if (\n record.message\n == \"SparkDFExecutionEngine computed 4 metrics on domain_id ()\"\n ):\n found_message = True\n assert found_message\n\n\n# Ensuring functionality of compute_domain when no domain kwargs are given\ndef test_get_compute_domain_with_no_domain_kwargs_alt(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={}, domain_type=\"table\"\n )\n\n # Ensuring that with no domain nothing happens to the data itself\n assert dataframes_equal(\n data, df\n ), \"Data does not match after getting compute domain\"\n assert compute_kwargs == {}, \"Compute domain kwargs should be existent\"\n assert accessor_kwargs == {}, \"Accessor kwargs have been modified\"\n\n\n# Testing for only untested use case - multicolumn\ndef test_get_compute_domain_with_column_pair(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"column_A\": \"a\", \"column_B\": \"b\"}, domain_type=\"column_pair\"\n )\n\n # Ensuring that with no domain nothing happens to the data itself\n assert dataframes_equal(\n data, df\n ), \"Data does not match after getting compute domain\"\n assert compute_kwargs == {}, \"Compute domain kwargs should be existent\"\n assert accessor_kwargs == {\n \"column_A\": \"a\",\n \"column_B\": \"b\",\n }, \"Accessor kwargs have been modified\"\n\n\n# Testing for only untested use case - multicolumn\ndef test_get_compute_domain_with_multicolumn(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None], \"c\": [1, 2, 3, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"column_list\": [\"a\", \"b\", \"c\"]}, domain_type=\"multicolumn\"\n )\n\n # Ensuring that with no domain nothing happens to the data itself\n assert dataframes_equal(\n data, df\n ), \"Data does not match after getting compute domain\"\n assert compute_kwargs == {}, \"Compute domain kwargs should be empty\"\n assert accessor_kwargs == {\n \"column_list\": [\"a\", \"b\", \"c\"]\n }, \"Accessor kwargs have been modified\"\n\n\n# Testing whether compute domain is properly calculated, but this time obtaining a column\ndef test_get_compute_domain_with_column_domain_alt(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\"column\": \"a\"}, domain_type=\"column\"\n )\n\n # Ensuring that column domain is now an accessor kwarg, and data remains unmodified\n assert dataframes_equal(\n data, df\n ), \"Data does not match after getting compute domain\"\n assert compute_kwargs == {}, \"Compute domain kwargs should be empty\"\n assert accessor_kwargs == {\"column\": \"a\"}, \"Accessor kwargs have been modified\"\n\n\n# Using an unmeetable row condition to see if empty dataset will result in errors\ndef test_get_domain_records_with_row_condition_alt(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n expected_df = df.where(\"b > 2\")\n\n # Loading batch data\n engine.load_batch_data(batch_data=df, batch_id=\"1234\")\n\n data = engine.get_domain_records(\n domain_kwargs={\n \"row_condition\": \"b > 2\",\n \"condition_parser\": \"spark\",\n }\n )\n\n # Ensuring data has been properly queried\n assert dataframes_equal(\n data, expected_df\n ), \"Data does not match after getting compute domain\"\n\n\n# What happens when we filter such that no value meets the condition?\ndef test_get_domain_records_with_unmeetable_row_condition_alt(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n expected_df = df.where(\"b > 24\")\n\n # Loading batch data\n engine.load_batch_data(batch_data=df, batch_id=\"1234\")\n\n data = engine.get_domain_records(\n domain_kwargs={\n \"row_condition\": \"b > 24\",\n \"condition_parser\": \"spark\",\n }\n )\n # Ensuring data has been properly queried\n assert dataframes_equal(\n data, expected_df\n ), \"Data does not match after getting compute domain\"\n\n # Ensuring errors for column and column_ pair domains are caught\n with pytest.raises(ge_exceptions.GreatExpectationsError):\n # noinspection PyUnusedLocal\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\n \"row_condition\": \"b > 24\",\n \"condition_parser\": \"spark\",\n },\n domain_type=\"column\",\n )\n with pytest.raises(ge_exceptions.GreatExpectationsError) as g:\n # noinspection PyUnusedLocal\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\n \"row_condition\": \"b > 24\",\n \"condition_parser\": \"spark\",\n },\n domain_type=\"column_pair\",\n )\n\n\n# Testing to ensure that great expectation experimental parser also works in terms of defining a compute domain\ndef test_get_compute_domain_with_ge_experimental_condition_parser(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n # Filtering expected data based on row condition\n expected_df = df.where(\"b == 2\")\n\n # Loading batch data\n engine.load_batch_data(batch_data=df, batch_id=\"1234\")\n\n # Obtaining data from computation\n data, compute_kwargs, accessor_kwargs = engine.get_compute_domain(\n domain_kwargs={\n \"column\": \"b\",\n \"row_condition\": 'col(\"b\") == 2',\n \"condition_parser\": \"great_expectations__experimental__\",\n },\n domain_type=\"column\",\n )\n # Ensuring data has been properly queried\n assert dataframes_equal(\n data, expected_df\n ), \"Data does not match after getting compute domain\"\n\n # Ensuring compute kwargs have not been modified\n assert (\n \"row_condition\" in compute_kwargs.keys()\n ), \"Row condition should be located within compute kwargs\"\n assert accessor_kwargs == {\"column\": \"b\"}, \"Accessor kwargs have been modified\"\n\n # Should react same for get_domain_records()\n data = engine.get_domain_records(\n domain_kwargs={\n \"column\": \"b\",\n \"row_condition\": 'col(\"b\") == 2',\n \"condition_parser\": \"great_expectations__experimental__\",\n }\n )\n # Ensuring data has been properly queried\n assert dataframes_equal(\n data, expected_df\n ), \"Data does not match after getting compute domain\"\n\n\ndef test_get_compute_domain_with_nonexistent_condition_parser(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, None]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n # Loading batch data\n engine.load_batch_data(batch_data=df, batch_id=\"1234\")\n\n # Expect GreatExpectationsError because parser doesn't exist\n with pytest.raises(ge_exceptions.GreatExpectationsError):\n # noinspection PyUnusedLocal\n data = engine.get_domain_records(\n domain_kwargs={\n \"row_condition\": \"b > 24\",\n \"condition_parser\": \"nonexistent\",\n },\n )\n\n\n# Ensuring that we can properly inform user when metric doesn't exist - should get a metric provider error\ndef test_resolve_metric_bundle_with_nonexistent_metric(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 2, 1, 2, 3, 3], \"b\": [4, 4, 4, 4, 4, 4]},\n ),\n batch_id=\"1234\",\n )\n\n desired_metric_1 = MetricConfiguration(\n metric_name=\"column_values.unique\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n )\n desired_metric_2 = MetricConfiguration(\n metric_name=\"column.min\",\n metric_domain_kwargs={\"column\": \"a\"},\n metric_value_kwargs=None,\n )\n desired_metric_3 = MetricConfiguration(\n metric_name=\"column.max\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n )\n desired_metric_4 = MetricConfiguration(\n metric_name=\"column.does_not_exist\",\n metric_domain_kwargs={\"column\": \"b\"},\n metric_value_kwargs=None,\n )\n\n # Ensuring a metric provider error is raised if metric does not exist\n with pytest.raises(ge_exceptions.MetricProviderError) as e:\n # noinspection PyUnusedLocal\n res = engine.resolve_metrics(\n metrics_to_resolve=(\n desired_metric_1,\n desired_metric_2,\n desired_metric_3,\n desired_metric_4,\n )\n )\n print(e)\n\n\n# Making sure dataframe property is functional\ndef test_dataframe_property_given_loaded_batch(spark_session):\n engine: SparkDFExecutionEngine = build_spark_engine(\n spark=spark_session,\n df=pd.DataFrame(\n {\"a\": [1, 5, 22, 3, 5, 10]},\n ),\n batch_id=\"1234\",\n )\n df = engine.dataframe\n\n # Ensuring Data not distorted\n assert engine.dataframe == df\n"
] | [
[
"numpy.isnan",
"numpy.allclose",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jnefoussi/pytechfin | [
"4d5bc44410b7161ab3acd65b2474468a84e576af"
] | [
"pytechfin/carol_techfin.py"
] | [
"from collections import defaultdict\nimport pandas as pd\n\n# TODO: Add custom pipeline function from \n# https://github.com/rafarui/techfin-reprocess/blob/master/functions/custom_pipeline.py\n\n# TODO: Add track_tasks function from\n# https://github.com/rafarui/techfin-reprocess/blob/master/functions/carol_task.py\n\nclass CarolTechfin:\n \"\"\" Module to handle Carol's data.\n Needed add in Carol Module\n \"\"\"\n\n def __init__(self, carol):\n self.carol = carol\n\n\n def get_staging_data(self, staging_name, \n connector_name='protheus_carol', merge_records=True, columns=None, callback=None, max_workers=30):\n \"\"\" Get records from a staging table.\n\n Args:\n staging_name: `str`,\n Staging name to fetch parquet of\n merge_records: `bool`, default `True`\n This will keep only the most recent record exported. Sometimes there are updates and/or deletions and\n one should keep only the last records.\n columns: `list`, default `None`\n List of columns to fetch.\n callback: `callable`, default `None`\n Function to be called each downloaded file.\n max_workers: `int` default `30`\n Number of workers to use when downloading parquet files with pandas back-end.\n\n Returns: `pandas.DataFrame`\n DataFrame with the staging data.\n\n \"\"\"\n\n # number of workers to download in parallel\n max_workers=max_workers\n\n # if you want to download a few columns, [\"COLUMNS\", \"TO\", \"FETCH\"]\n col=columns\n\n # maximum records to fetch. P.S.: only works if `max_workers=None`\n max_hits=None \n\n # if metadata should be returned (mdmId, mdmLastUpdated, etc)\n return_metadata = True\n\n # if records with duplicated ids should be consolidated by pyCarol\n merge_records = merge_records\n\n #connector + staging table\n connector_name=connector_name\n staging = staging_name\n\n # file_pattern = '2021-02'\n file_pattern = None\n\n df = self.carol.staging.fetch_parquet(\n staging_name=staging, \n connector_name=connector_name, \n max_workers=max_workers, \n columns=col, \n merge_records=merge_records, \n return_metadata=return_metadata, \n max_hits=max_hits,\n callback=callback, file_pattern=file_pattern)\n\n return df\n\n\n def get_realtime_data(self, datamodel_name):\n \"\"\" Get records from a realtime datamodel\n\n Args:\n datamodel_name: ``str`\n Carol datamodel name\n\n Returns: `pandas.DataFrame`\n DataFrame with the realtime data.\n \"\"\"\n\n filter = {\n \"mustList\": [\n {\n \"mdmFilterType\": \"TYPE_FILTER\",\n \"mdmValue\": datamodel_name+\"Golden\" \n }\n ,\n {\n \"mdmFilterType\": \"TERM_FILTER\",\n \"mdmKey\":\"mdmMergePending\",\n \"mdmValue\": \"false\"\n },\n {\n \"mdmFilterType\": \"RANGE_FILTER\",\n \"mdmKey\": \"mdmCounterForEntity\",\n \"mdmValue\": [0,'null'],\n \"mdmValuesQuery\": {}\n }\n ]\n }\n\n result = self.carol.query(only_hits=True, page_size=1000, print_status=True).query(filter).go().results\n realtime = pd.DataFrame(result)\n\n return realtime\n\n def get_cds_data(self, datamodel_name, merge_records=True, columns = None, return_metadata = False, callback=None, max_workers=30):\n \"\"\"[summary]\n\n Args:\n datamodel_name: `str` optional\n Carol datamodel name\n merge_records: `bool` optional\n Merge cds data. Defaults to True.\n columns: `list of string` optional\n Datamodel's columns. Defaults to None (return all columns).\n return_metadata: `bool` optional \n Return Carol metadata columns. Defaults to False.\n callback: `function` optional\n Callback function to handle data. Defaults to None.\n max_workers: `int` optional\n Number of worker used to process. Defaults to 30.\n\n Returns: `pandas.DataFrame`\n DataFrame with the staging data.\n \"\"\"\n\n df = self.carol.datamodel.fetch_parquet(\n dm_name=datamodel_name, max_workers=max_workers,\n backend='pandas', return_dask_graph=False, columns=columns, merge_records=merge_records, \n return_metadata=return_metadata, max_hits=None, callback=callback , cds=True,\n file_pattern=None)\n\n return df\n\n def get_datamodel_relationship_constraints(self, dm_list=None):\n \"\"\"\n Create relationship between data models based on their relationship constraints\n Args:\n carol: `pycarol.Carol`\n CarolAPI() object.\n prefix: 'str` default `DM_`\n prefix to add to the data model name. e.g., \n if dm_name='mydatamoldel', the result will be \"DM_mydatamoldel`\n Returns: `defaultdict(set)`\n dictionary { \"dm1\" : {\"dm2\": \"field_dm_1\" : \"field_dm_2\"}}\n \"\"\"\n \n # find Relationship Constraints\n if dm_list is None:\n dms = self.carol.datamodel.get_all().template_dict.keys()\n else:\n dms = dm_list\n relationship_constraints = defaultdict(list)\n for i in dms:\n snap = self.carol.datamodel.get_by_name(i)['mdmRelationshipConstraints']\n if snap:\n relationship_constraints[i].append({i[\"mdmTargetEntityName\"]:i[\"mdmSourceTargetFieldName\"] for i in snap})\n return relationship_constraints\n\n def process_staging(self, stagings_list):\n \"\"\" Process a list of staging tables\n\n Args:\n stagings_list `list str`: List of stagings name\n \"\"\"\n\n for staging_name in stagings_list:\n print(f'adding process staging task to staging: {staging_name} ')\n self.carol.cds_staging.process_data(staging_name, connector_name='protheus_carol', recursive_processing=False)\n print(f'see more in https://{self.carol.organization}.{self.carol.environment}/{self.carol.domain}/carol-ui/tasks')\n\n \n def get_carol_record_count(self):\n \"\"\" Get carol record count from tenant explore stats\n\n Returns:\n `dict`\n Dict with datamodels stats\n \"\"\"\n response = self.carol.call_api(path=f'v1/dashboard/exploreStatistics?days=3', method='GET')\n\n return response[\"exploreStats\"]\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
qifwa493/Camera_surface_area | [
"ebac18bd01ba7c615be63626aeb30bd9e07f53bb"
] | [
"versions/V1.2/Components/py_getContour.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Functions for finding a possible contour in the target image\n\nimport cv2\nimport numpy as np\n\n\ndef showImg(winName, mat, Width=None, Height=None):\n # Get image size\n if Width is None or Height is None:\n Height, Width = mat.shape[:2]\n\n # Display image\n cv2.namedWindow(winName, 0)\n cv2.resizeWindow(winName, Width, Height)\n cv2.imshow(winName, mat)\n\n\ndef findContours(Image, MinArcLength=30, Hull=False, Background=True):\n gray = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY)\n\n if Background:\n black = Image\n else:\n size = gray.shape[:2]\n black = np.zeros([size[0], size[1], 3], dtype=np.uint8)\n\n # Canny edge detection\n gray = cv2.bilateralFilter(gray, 9, 75, 75)\n meat = cv2.Canny(gray, 30, 60, L2gradient=True)\n # kernel = np.ones((7, 7), np.uint8)\n # meat = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)\n\n # Find contours\n # showImg('meat', meat, 900, 600)\n # cv2.waitKey(1)\n contours, hierarchy = cv2.findContours(meat, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n maxContour = contours[0]\n maxLength = 0\n count = 0\n filteredContours = []\n for c in contours:\n length = cv2.arcLength(c, True)\n\n\n if 7000 < length < 7500:\n maxContour = c\n\n\n # Find the long contour\n '''if length > MinArcLength:\n if length > maxLength:\n maxContour = c\n maxLength = length\n\n # Find all the contours that longer than the minimum arc length\n if length > MinArcLength:\n print('Contour ' + str(count) + ': ' + '{:.3f}'.format(length))\n print('Hierarchy: ' + str(hierarchy[0][count]))\n if Hull:\n c = cv2.convexHull(c)\n\n # Draw the contours\n temp = c[0]\n firstPoint = c[0]\n for point in c:\n cv2.line(black, (temp[0][0], temp[0][1]), (point[0][0], point[0][1]), (0, 0, 255), 3, lineType=cv2.LINE_AA)\n temp = point\n cv2.line(black, (temp[0][0], temp[0][1]), (firstPoint[0][0], firstPoint[0][1]), (0, 0, 255), 3, lineType=cv2.LINE_AA)\n\n # black = cv2.drawContours(black, hull, -1, (0, 0, 255), 3)\n\n showImg('temp', black)\n cv2.waitKey(0)\n\n count += 1\n # if count > 4:\n # break\n\n filteredContours.append(c)'''\n\n # Draw the contours\n print('Contour length: ' + '{:.3f}'.format(cv2.arcLength(maxContour, True)))\n temp = maxContour[0]\n firstPoint = maxContour[0]\n for i in range(len(maxContour)):\n point = maxContour[i]\n cv2.line(black, (temp[0][0], temp[0][1]), (point[0][0], point[0][1]), (255, 255, 255), 1)\n temp = point\n cv2.line(black, (temp[0][0], temp[0][1]), (firstPoint[0][0], firstPoint[0][1]), (255, 255, 255), 1)\n\n return black, maxContour\n\n\nif __name__ == '__main__':\n fileName = 'DSC_0631_after.jpg'\n image = cv2.imread(fileName)\n\n res, contours = findContours(image, Hull=False, MinArcLength=1000, Background=False)\n\n # cv2.imwrite(fileName.split('.')[0] + '_edges.jpg', res)\n showImg('res', res, 900, 600)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luoyan407/predict_trustworthiness_smallscale | [
"b7e1e2a68b0aee9b484228d1b5686f7252919e97",
"b7e1e2a68b0aee9b484228d1b5686f7252919e97",
"b7e1e2a68b0aee9b484228d1b5686f7252919e97"
] | [
"confidnet/models/segnet_selfconfid.py",
"confidnet/models/vgg16_oodconfid.py",
"confidnet/loaders/loader.py"
] | [
"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom confidnet.models.model import AbstractModel\nfrom confidnet.models.segnet import segnetDown2, segnetDown3, segnetUp2, segnetUp3\n\n\nclass SegnetSelfConfid(AbstractModel):\n def __init__(self, config_args, device):\n super().__init__(config_args, device)\n self.in_channels = config_args[\"data\"][\"input_channels\"]\n self.n_classes = config_args[\"data\"][\"num_classes\"]\n self.is_unpooling = True\n self.dropout = config_args[\"model\"][\"is_dropout\"]\n\n self.down1 = segnetDown2(self.in_channels, 64)\n self.down2 = segnetDown2(64, 128)\n self.down3 = segnetDown3(128, 256)\n self.dropout_down3 = nn.Dropout(0.5)\n self.down4 = segnetDown3(256, 512)\n self.dropout_down4 = nn.Dropout(0.5)\n self.down5 = segnetDown3(512, 512)\n self.dropout_down5 = nn.Dropout(0.5)\n\n self.up5 = segnetUp3(512, 512)\n self.dropout_up5 = nn.Dropout(0.5)\n self.up4 = segnetUp3(512, 256)\n self.dropout_up4 = nn.Dropout(0.4)\n self.up3 = segnetUp3(256, 128)\n self.dropout_up3 = nn.Dropout(0.3)\n self.up2 = segnetUp2(128, 64)\n self.up1 = segnetUp2(64, self.n_classes)\n\n self.unpool_uncertainty = nn.MaxUnpool2d(2, 2)\n self.uncertainty1 = nn.Conv2d(64, 400, 3, 1, 1)\n self.uncertainty2 = nn.Conv2d(400, 120, 3, 1, 1)\n self.uncertainty3 = nn.Conv2d(120, 64, 3, 1, 1)\n self.uncertainty4 = nn.Conv2d(64, 64, 3, 1, 1)\n self.uncertainty5 = nn.Conv2d(64, 1, 3, 1, 1)\n\n def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.down1(inputs)\n down2, indices_2, unpool_shape2 = self.down2(down1)\n down3, indices_3, unpool_shape3 = self.down3(down2)\n if self.dropout:\n if self.mc_dropout:\n down3 = F.dropout(down3, 0.5, training=self.training)\n else:\n down3 = self.dropout_down3(down3)\n down4, indices_4, unpool_shape4 = self.down4(down3)\n if self.dropout:\n if self.mc_dropout:\n down4 = F.dropout(down4, 0.5, training=self.training)\n else:\n down4 = self.dropout_down3(down4)\n down5, indices_5, unpool_shape5 = self.down5(down4)\n if self.dropout:\n if self.mc_dropout:\n down5 = F.dropout(down5, 0.5, training=self.training)\n else:\n down5 = self.dropout_down3(down5)\n\n up5 = self.up5(down5, indices_5, unpool_shape5)\n if self.dropout:\n if self.mc_dropout:\n up5 = F.dropout(up5, 0.5, training=self.training)\n else:\n up5 = self.dropout_up5(up5)\n up4 = self.up4(up5, indices_4, unpool_shape4)\n if self.dropout:\n if self.mc_dropout:\n up4 = F.dropout(up4, 0.5, training=self.training)\n else:\n up4 = self.dropout_up4(up4)\n up3 = self.up3(up4, indices_3, unpool_shape3)\n if self.dropout:\n if self.mc_dropout:\n up3 = F.dropout(up3, 0.5, training=self.training)\n else:\n up3 = self.dropout_up3(up3)\n up2 = self.up2(up3, indices_2, unpool_shape2)\n up1 = self.up1(up2, indices_1, unpool_shape1)\n\n uncertainty = self.unpool_uncertainty(up2, indices_1, unpool_shape1)\n uncertainty = F.relu(self.uncertainty1(uncertainty))\n uncertainty = F.relu(self.uncertainty2(uncertainty))\n uncertainty = F.relu(self.uncertainty3(uncertainty))\n uncertainty = F.relu(self.uncertainty4(uncertainty))\n uncertainty = self.uncertainty5(uncertainty)\n\n return up1, uncertainty\n\n def print_summary(self, input_size):\n pass\n",
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models\n\nfrom confidnet.models.model import AbstractModel\nfrom confidnet.models.vgg16 import Conv2dSame\n\n\nclass VGG16OODConfid(AbstractModel):\n def __init__(self, config_args, device):\n super().__init__(config_args, device)\n self.conv1 = Conv2dSame(config_args[\"data\"][\"input_channels\"], 64, 3)\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv1_dropout = nn.Dropout(0.3)\n self.conv2 = Conv2dSame(64, 64, 3)\n self.conv2_bn = nn.BatchNorm2d(64)\n self.maxpool1 = nn.MaxPool2d(2)\n\n self.conv3 = Conv2dSame(64, 128, 3)\n self.conv3_bn = nn.BatchNorm2d(128)\n self.conv3_dropout = nn.Dropout(0.4)\n self.conv4 = Conv2dSame(128, 128, 3)\n self.conv4_bn = nn.BatchNorm2d(128)\n self.maxpool2 = nn.MaxPool2d(2)\n\n self.conv5 = Conv2dSame(128, 256, 3)\n self.conv5_bn = nn.BatchNorm2d(256)\n self.conv5_dropout = nn.Dropout(0.4)\n self.conv6 = Conv2dSame(256, 256, 3)\n self.conv6_bn = nn.BatchNorm2d(256)\n self.conv6_dropout = nn.Dropout(0.4)\n self.conv7 = Conv2dSame(256, 256, 3)\n self.conv7_bn = nn.BatchNorm2d(256)\n self.maxpool3 = nn.MaxPool2d(2)\n\n self.conv8 = Conv2dSame(256, 512, 3)\n self.conv8_bn = nn.BatchNorm2d(512)\n self.conv8_dropout = nn.Dropout(0.4)\n self.conv9 = Conv2dSame(512, 512, 3)\n self.conv9_bn = nn.BatchNorm2d(512)\n self.conv9_dropout = nn.Dropout(0.4)\n self.conv10 = Conv2dSame(512, 512, 3)\n self.conv10_bn = nn.BatchNorm2d(512)\n self.maxpool4 = nn.MaxPool2d(2)\n\n self.conv11 = Conv2dSame(512, 512, 3)\n self.conv11_bn = nn.BatchNorm2d(512)\n self.conv11_dropout = nn.Dropout(0.4)\n self.conv12 = Conv2dSame(512, 512, 3)\n self.conv12_bn = nn.BatchNorm2d(512)\n self.conv12_dropout = nn.Dropout(0.4)\n self.conv13 = Conv2dSame(512, 512, 3)\n self.conv13_bn = nn.BatchNorm2d(512)\n self.maxpool5 = nn.MaxPool2d(2)\n\n self.end_dropout = nn.Dropout(0.5)\n\n self.fc1 = nn.Linear(512, 512)\n self.dropout_fc = nn.Dropout(0.5)\n self.fc2 = nn.Linear(512, config_args[\"data\"][\"num_classes\"])\n\n self.uncertainty1 = nn.Linear(512, 1)\n\n def forward(self, x):\n out = F.relu(self.conv1(x))\n out = self.conv1_bn(out)\n out = self.conv1_dropout(out)\n out = F.relu(self.conv2(out))\n out = self.conv2_bn(out)\n out = self.maxpool1(out)\n\n out = F.relu(self.conv3(out))\n out = self.conv3_bn(out)\n out = self.conv3_dropout(out)\n out = F.relu(self.conv4(out))\n out = self.conv4_bn(out)\n out = self.maxpool2(out)\n\n out = F.relu(self.conv5(out))\n out = self.conv5_bn(out)\n out = self.conv5_dropout(out)\n out = F.relu(self.conv6(out))\n out = self.conv6_bn(out)\n out = self.conv6_dropout(out)\n out = F.relu(self.conv7(out))\n out = self.conv7_bn(out)\n out = self.maxpool3(out)\n\n out = F.relu(self.conv8(out))\n out = self.conv8_bn(out)\n out = self.conv8_dropout(out)\n out = F.relu(self.conv9(out))\n out = self.conv9_bn(out)\n out = self.conv9_dropout(out)\n out = F.relu(self.conv10(out))\n out = self.conv10_bn(out)\n out = self.maxpool4(out)\n\n out = F.relu(self.conv11(out))\n out = self.conv11_bn(out)\n out = self.conv11_dropout(out)\n out = F.relu(self.conv12(out))\n out = self.conv12_bn(out)\n out = self.conv12_dropout(out)\n out = F.relu(self.conv13(out))\n out = self.conv13_bn(out)\n out = self.maxpool5(out)\n\n out = self.end_dropout(out)\n out = out.view(out.size(0), -1)\n out = F.relu(self.fc1(out))\n out = self.dropout_fc(out)\n\n uncertainty = self.uncertainty1(out)\n pred = self.fc2(out)\n\n return pred, uncertainty\n\n def init_vgg16_params(self):\n vgg16 = models.vgg16(pretrained=True).to(self.device)\n vgg_layers = []\n for _layer in vgg16.features.children():\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n model_layers = [\n self.conv1,\n self.conv2,\n self.conv3,\n self.conv4,\n self.conv5,\n self.conv6,\n self.conv7,\n self.conv8,\n self.conv9,\n self.conv10,\n self.conv11,\n self.conv12,\n self.conv13,\n ]\n\n assert len(vgg_layers) == len(model_layers)\n\n for l1, l2 in zip(vgg_layers, model_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n",
"from pathlib import Path\nimport numpy as np\nimport torch\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nfrom confidnet.augmentations import get_composed_augmentations\nfrom confidnet.utils.logger import get_logger\n\nLOGGER = get_logger(__name__, level=\"DEBUG\")\n\n\nclass AbstractDataLoader: \n def __init__(self, config_args):\n self.output_folder = config_args['training']['output_folder']\n self.data_dir = config_args['data']['data_dir']\n self.batch_size = config_args['training']['batch_size']\n self.img_size = (config_args['data']['input_size'][0],\n config_args['data']['input_size'][1],\n config_args['data']['input_channels'])\n self.augmentations = config_args['training'].get('augmentations', None)\n self.ft_on_val = config_args['training'].get('ft_on_val', None)\n self.resume_folder = config_args['model']['resume'].parent if isinstance(config_args['model']['resume'], Path) else None\n self.valid_size = config_args['data']['valid_size']\n self.perturbed_folder = config_args['data'].get('perturbed_images', None)\n self.pin_memory = config_args['training']['pin_memory']\n self.num_workers = config_args['training']['num_workers']\n self.train_loader, self.val_loader, self.test_loader = None, None, None\n\n # Set up augmentations\n self.augmentations_train, self.augmentations_train_lbl = None, None\n self.augmentations_test, self.augmentations_test_lbl = None, None\n if self.augmentations:\n LOGGER.info(\"--- Augmentations ---\")\n self.add_augmentations()\n\n # Load dataset\n self.train_dataset, self.val_dataset, self.test_dataset = None, None, None\n self.load_dataset()\n\n def add_augmentations(self):\n self.augmentations_train = get_composed_augmentations(\n self.augmentations, training=\"classif\"\n )\n self.augmentations_train_lbl = get_composed_augmentations(\n {\n key: self.augmentations[key]\n for key in self.augmentations\n if key not in [\"normalize\", \"color_jitter\"]\n },\n verbose=False,\n training=\"classif\",\n )\n self.augmentations_test = get_composed_augmentations(\n {key: self.augmentations[key] for key in self.augmentations if key == \"normalize\"},\n verbose=False,\n training=\"classif\",\n )\n self.augmentations_test_lbl = get_composed_augmentations(\n None, verbose=False, training=\"classif\"\n )\n\n def load_dataset(self):\n pass\n\n def make_loaders(self):\n self.test_loader = torch.utils.data.DataLoader(\n self.test_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n pin_memory=self.pin_memory,\n num_workers=self.num_workers,\n )\n\n if self.valid_size == 0:\n LOGGER.warning(\"Valid size=0, no validation loader\")\n self.train_loader = torch.utils.data.DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n pin_memory=self.pin_memory,\n num_workers=self.num_workers,\n )\n else:\n num_train = len(self.train_dataset)\n indices = list(range(num_train))\n\n if (self.output_folder / \"train_idx.npy\").exists():\n LOGGER.warning(\"Loading existing train-val split indices\")\n train_idx = np.load(self.output_folder / \"train_idx.npy\")\n val_idx = np.load(self.output_folder / \"val_idx.npy\")\n # Splitting indices\n elif self.resume_folder:\n LOGGER.warning(\"Loading existing train-val split indices from ORIGINAL training\")\n train_idx = np.load(self.resume_folder / \"train_idx.npy\")\n val_idx = np.load(self.resume_folder / \"val_idx.npy\")\n else:\n split = int(np.floor(self.valid_size * num_train))\n np.random.seed(42)\n np.random.shuffle(indices)\n train_idx, val_idx = indices[split:], indices[:split]\n np.save(self.output_folder / \"train_idx.npy\", train_idx)\n np.save(self.output_folder / \"val_idx.npy\", val_idx)\n # Make samplers\n train_sampler = SubsetRandomSampler(train_idx)\n val_sampler = SubsetRandomSampler(val_idx)\n # Special case where val set is used for training\n if self.ft_on_val:\n LOGGER.warning(\"Using val set as training\")\n train_sampler = val_sampler\n # Make loaders\n self.train_loader = torch.utils.data.DataLoader(\n dataset=self.train_dataset,\n batch_size=self.batch_size,\n sampler=train_sampler,\n pin_memory=self.pin_memory,\n num_workers=self.num_workers,\n )\n self.val_loader = torch.utils.data.DataLoader(\n dataset=self.train_dataset,\n batch_size=self.batch_size,\n sampler=val_sampler,\n pin_memory=self.pin_memory,\n num_workers=self.num_workers,\n )\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.Conv2d",
"torch.nn.functional.dropout",
"torch.nn.MaxUnpool2d"
],
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Dropout",
"torch.nn.BatchNorm2d"
],
[
"numpy.random.seed",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler",
"numpy.random.shuffle",
"numpy.save",
"numpy.floor",
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChitandaXu/ECG_classify | [
"bffd810dd7c0a03c18dfc58d3150c7b98b528105"
] | [
"ecg_classify/gen_data.py"
] | [
"import numpy as np\nimport os\nimport pandas as pd\nfrom ecg_classify.constants import DIM, heartbeat_factory, CLASS_NUM, TRAIN_SIZE, TEST_SIZE, LABEL_LIST\nfrom ecg_classify.gen_feature import gen_feature\n\n\ndef read_data(force=False):\n if (not (os.path.isfile('train.csv') and os.path.isfile('test.csv'))) or force:\n __write_data(True)\n __write_data(False)\n df_train = pd.read_csv('train.csv')\n df_test = pd.read_csv('test.csv')\n return df_train, df_test\n\n\ndef gen_data(symbol, is_training=True):\n heartbeat = heartbeat_factory(symbol, is_training)\n if is_training:\n num_list = list(heartbeat.keys())\n res = np.empty((4000, DIM), dtype='<U32')\n else:\n num_list = list(heartbeat.keys())\n res = np.empty((1000, DIM), dtype='<U32')\n cur = 0\n for num in num_list:\n feature = gen_feature(num)\n val = heartbeat[num]\n res[cur: cur + val] = feature[feature[:, -1] == symbol][0: val]\n cur = cur + val\n if symbol == 'A' or (symbol == '/' and is_training):\n half = res.shape[0] // 2\n res = res[0: half]\n res = np.concatenate([res, res])\n return res\n\n\ndef gen_label(is_training_set=True):\n if is_training_set:\n scale = TRAIN_SIZE\n else:\n scale = TEST_SIZE\n labels = np.zeros(scale * CLASS_NUM)\n for i in range(CLASS_NUM):\n labels[scale * i: scale * (i + 1)] = i\n return labels\n\n\ndef __write_data(is_training=True):\n if is_training:\n scale = TRAIN_SIZE\n else:\n scale = TEST_SIZE\n res = np.empty((scale * CLASS_NUM, DIM), dtype='<U32')\n for i in range(CLASS_NUM):\n res[scale * i: scale * (i + 1)] = gen_data(LABEL_LIST[i], is_training)\n df = pd.DataFrame(res)\n if is_training:\n df.to_csv(\"train.csv\", index=False)\n else:\n df.to_csv(\"test.csv\", index=False)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
rodrigob/beam | [
"e2ce4037f85619f946b3d6a3a90955cdf1c19b4a"
] | [
"sdks/python/apache_beam/examples/complete/distribopt.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Example illustrating the use of Apache Beam for solving distributing\noptimization tasks.\n\nThis example solves an optimization problem which consists of distributing a\nnumber of crops to grow in several greenhouses. The decision where to grow the\ncrop has an impact on the production parameters associated with the greenhouse,\nwhich affects the total cost of production at the greenhouse. Additionally,\neach crop needs to be transported to a customer so the decision where to grow\nthe crop has an impact on the transportation costs as well.\n\nThis type of optimization problems are known as mixed-integer programs as they\nexist of discrete parameters (do we produce a crop in greenhouse A, B or C?)\nand continuous parameters (the greenhouse production parameters).\n\nRunning this example requires NumPy and SciPy. The input consists of a CSV file\nwith the following columns (Tx representing the transporation cost/unit if the\ncrop is produced in greenhouse x): Crop name, Quantity, Ta, Tb, Tc, ....\n\nExample input file with 5 crops and 3 greenhouses (a transporation cost of 0\nforbids production of the crop in a greenhouse):\nOP01,8,12,0,12\nOP02,30,14,3,12\nOP03,25,7,3,14\nOP04,87,7,2,2\nOP05,19,1,7,10\n\nThe pipeline consists of three phases:\n - Creating a grid of mappings (assignment of each crop to a greenhouse)\n - For each mapping and each greenhouse, optimization of the production\n parameters for cost, addition of the transporation costs, and aggregation\n of the costs for each mapping.\n - Selecting the mapping with the lowest cost.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport logging\nimport string\nimport uuid\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport apache_beam as beam\nfrom apache_beam import pvalue\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\nfrom scipy.optimize import minimize\n\n\nclass Simulator(object):\n \"\"\"Greenhouse simulation for the optimization of greenhouse parameters.\"\"\"\n\n def __init__(self, quantities):\n super(Simulator, self).__init__()\n self.quantities = np.atleast_1d(quantities)\n\n self.A = np.array([[3.0, 10, 30],\n [0.1, 10, 35],\n [3.0, 10, 30],\n [0.1, 10, 35]])\n\n self.P = 1e-4 * np.array([[3689, 1170, 2673],\n [4699, 4387, 7470],\n [1091, 8732, 5547],\n [381, 5743, 8828]])\n\n a0 = np.array([[1.0, 1.2, 3.0, 3.2]])\n coeff = np.sum(np.cos(np.dot(a0.T, self.quantities[None, :])), axis=1)\n self.alpha = coeff / np.sum(coeff)\n\n def simulate(self, xc):\n # Map the input parameter to a cost for each crop.\n weighted_distance = np.sum(self.A * np.square(xc - self.P), axis=1)\n f = -np.sum(self.alpha * np.exp(-weighted_distance))\n return np.square(f) * np.log(self.quantities)\n\n\nclass CreateGrid(beam.PTransform):\n \"\"\"A transform for generating the mapping grid.\n\n Input: Formatted records of the input file, e.g.,\n {\n 'crop': 'OP009',\n 'quantity': 102,\n 'transport_costs': [('A', None), ('B', 3), ('C', 8)]\n }\n Output: tuple (mapping_identifier, {crop -> greenhouse})\n \"\"\"\n\n class PreGenerateMappings(beam.DoFn):\n \"\"\"ParDo implementation forming based on two elements a small sub grid.\n\n This facilitates parallellization of the grid generation.\n Emits two PCollections: the subgrid represented as collection of lists of\n two tuples, and a list of remaining records. Both serve as an input to\n GenerateMappings.\n \"\"\"\n\n def process(self, element):\n records = list(element[1])\n # Split of 2 crops and pre-generate the subgrid.\n # Select the crop with highest number of possible greenhouses:\n # in case two crops with only a single possible greenhouse were selected\n # the subgrid would consist of only 1 element.\n best_split = np.argsort([-len(r['transport_costs']) for r in records])[:2]\n rec1 = records[best_split[0]]\n rec2 = records[best_split[1]]\n\n # Generate & emit all combinations\n for a in rec1['transport_costs']:\n if a[1]:\n for b in rec2['transport_costs']:\n if b[1]:\n combination = [(rec1['crop'], a[0]), (rec2['crop'], b[0])]\n yield pvalue.TaggedOutput('splitted', combination)\n\n # Pass on remaining records\n remaining = [rec for i, rec in enumerate(records) if i not in best_split]\n yield pvalue.TaggedOutput('combine', remaining)\n\n class GenerateMappings(beam.DoFn):\n \"\"\"ParDo implementation to generate all possible mappings.\n\n Input: output of PreGenerateMappings\n Output: tuples of the form (mapping_identifier, {crop -> greenhouse})\n \"\"\"\n\n @staticmethod\n def _coordinates_to_greenhouse(coordinates, greenhouses, crops):\n # Map the grid coordinates back to greenhouse labels\n arr = []\n for coord in coordinates:\n arr.append(greenhouses[coord])\n return dict(zip(crops, np.array(arr)))\n\n def process(self, element, records):\n # Generate available greenhouses and grid coordinates for each crop.\n grid_coordinates = []\n for rec in records:\n # Get indices for available greenhouses (w.r.t crops)\n filtered = [i for i, av in enumerate(rec['transport_costs']) if av[1]]\n grid_coordinates.append(filtered)\n\n # Generate all mappings\n grid = np.vstack(list(map(np.ravel, np.meshgrid(*grid_coordinates)))).T\n crops = [rec['crop'] for rec in records]\n greenhouses = [rec[0] for rec in records[0]['transport_costs']]\n for point in grid:\n # translate back to greenhouse label\n mapping = self._coordinates_to_greenhouse(point, greenhouses, crops)\n assert all(rec[0] not in mapping for rec in element)\n # include the incomplete mapping of 2 crops\n mapping.update(element)\n # include identifier\n yield (uuid.uuid4().hex, mapping)\n\n def expand(self, records):\n o = (\n records\n | 'pair one' >> beam.Map(lambda x: (1, x))\n | 'group all records' >> beam.GroupByKey()\n | 'split one of' >> beam.ParDo(self.PreGenerateMappings())\n .with_outputs('splitted', 'combine')\n )\n\n # Create mappings, and prevent fusion (this limits the parallelization\n # in the optimization step)\n mappings = (\n o.splitted\n | 'create mappings' >> beam.ParDo(self.GenerateMappings(),\n pvalue.AsSingleton(o.combine))\n | 'prevent fusion' >> beam.Reshuffle()\n )\n\n return mappings\n\n\nclass OptimizeGrid(beam.PTransform):\n \"\"\"A transform for optimizing all greenhouses of the mapping grid.\"\"\"\n\n class CreateOptimizationTasks(beam.DoFn):\n \"\"\"\n Create tasks for optimization.\n\n Input: (mapping_identifier, {crop -> greenhouse})\n Output: ((mapping_identifier, greenhouse), [(crop, quantity),...])\n \"\"\"\n\n def process(self, element, quantities):\n mapping_identifier, mapping = element\n\n # Create (crop, quantity) lists for each greenhouse\n greenhouses = defaultdict(list)\n for crop, greenhouse in mapping.iteritems():\n quantity = quantities[crop]\n greenhouses[greenhouse].append((crop, quantity))\n\n # Create input for OptimizeProductParameters\n for greenhouse, crops in greenhouses.iteritems():\n key = (mapping_identifier, greenhouse)\n yield (key, crops)\n\n class OptimizeProductParameters(beam.DoFn):\n \"\"\"Solve the optimization task to determine optimal production parameters.\n Input: ((mapping_identifier, greenhouse), [(crop, quantity),...])\n Two outputs:\n - solution: (mapping_identifier, (greenhouse, [production parameters]))\n - costs: (crop, greenhouse, mapping_identifier, cost)\n \"\"\"\n\n @staticmethod\n def _optimize_production_parameters(sim):\n # setup initial starting point & bounds\n x0 = 0.5 * np.ones(3)\n bounds = list(zip(np.zeros(3), np.ones(3)))\n\n # Run L-BFGS-B optimizer\n result = minimize(lambda x: np.sum(sim.simulate(x)), x0, bounds=bounds)\n return result.x.tolist(), sim.simulate(result.x)\n\n def process(self, element):\n mapping_identifier, greenhouse = element[0]\n crops, quantities = zip(*element[1])\n sim = Simulator(quantities)\n optimum, costs = self._optimize_production_parameters(sim)\n solution = (mapping_identifier, (greenhouse, optimum))\n yield pvalue.TaggedOutput('solution', solution)\n for crop, cost, quantity in zip(crops, costs, quantities):\n costs = (crop, greenhouse, mapping_identifier, cost * quantity)\n yield pvalue.TaggedOutput('costs', costs)\n\n def expand(self, inputs):\n mappings, quantities = inputs\n opt = (\n mappings\n | 'optimization tasks' >> beam.ParDo(self.CreateOptimizationTasks(),\n pvalue.AsDict(quantities))\n | 'optimize' >> beam.ParDo(self.OptimizeProductParameters())\n .with_outputs('costs', 'solution')\n )\n return opt\n\n\nclass CreateTransportData(beam.DoFn):\n \"\"\"Transform records to pvalues ((crop, greenhouse), transport_cost)\"\"\"\n\n def process(self, record):\n crop = record['crop']\n for greenhouse, transport_cost in record['transport_costs']:\n yield ((crop, greenhouse), transport_cost)\n\n\ndef add_transport_costs(element, transport, quantities):\n \"\"\"Adds the transport cost for the crop to the production cost.\n\n elements are of the form (crop, greenhouse, mapping, cost), the cost only\n corresponds to the production cost. Return the same format, but including\n the transport cost.\n \"\"\"\n crop = element[0]\n cost = element[3]\n # lookup & compute cost\n transport_key = element[:2]\n transport_cost = transport[transport_key] * quantities[crop]\n return element[:3] + (cost + transport_cost,)\n\n\ndef parse_input(line):\n # Process each line of the input file to a dict representing each crop\n # and the transport costs\n columns = line.split(',')\n\n # Assign each greenhouse a character\n transport_costs = []\n for greenhouse, cost in zip(string.ascii_uppercase, columns[2:]):\n info = (greenhouse, int(cost) if cost else None)\n transport_costs.append(info)\n\n return {\n 'crop': columns[0],\n 'quantity': int(columns[1]),\n 'transport_costs': transport_costs\n }\n\n\ndef format_output(element):\n \"\"\"Transforms the datastructure (unpack lists introduced by CoGroupByKey)\n before writing the result to file.\n \"\"\"\n result = element[1]\n result['cost'] = result['cost'][0]\n result['production'] = dict(result['production'])\n result['mapping'] = result['mapping'][0]\n return result\n\n\ndef run(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('--input',\n dest='input',\n required=True,\n help='Input description to process.')\n parser.add_argument('--output',\n dest='output',\n required=True,\n help='Output file to write results to.')\n known_args, pipeline_args = parser.parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n\n with beam.Pipeline(options=pipeline_options) as p:\n # Parse input file\n records = (\n p\n | 'read' >> beam.io.ReadFromText(known_args.input)\n | 'process input' >> beam.Map(parse_input)\n )\n\n # Create two pcollections, used as side inputs\n transport = (\n records\n | 'create transport' >> beam.ParDo(CreateTransportData())\n )\n\n quantities = (\n records\n | 'create quantities' >> beam.Map(lambda r: (r['crop'], r['quantity']))\n )\n\n # Generate all mappings and optimize greenhouse production parameters\n mappings = records | CreateGrid()\n opt = (mappings, quantities) | OptimizeGrid()\n\n # Then add the transport costs and sum costs per crop.\n costs = (\n opt.costs\n | 'include transport' >> beam.Map(add_transport_costs,\n pvalue.AsDict(transport),\n pvalue.AsDict(quantities))\n | 'drop crop and greenhouse' >> beam.Map(lambda x: (x[2], x[3]))\n | 'aggregate crops' >> beam.CombinePerKey(sum)\n )\n\n # Join cost, mapping and production settings solution on mapping identifier.\n # Then select best.\n join_operands = {\n 'cost': costs,\n 'production': opt.solution,\n 'mapping': mappings\n }\n best = (\n join_operands\n | 'join' >> beam.CoGroupByKey()\n | 'select best' >> beam.CombineGlobally(min, key=lambda x: x[1]['cost'])\n .without_defaults()\n | 'format output' >> beam.Map(format_output)\n )\n\n # pylint: disable=expression-not-assigned\n best | 'write optimum' >> beam.io.WriteToText(known_args.output)\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()\n"
] | [
[
"numpy.square",
"numpy.dot",
"numpy.log",
"numpy.meshgrid",
"numpy.ones",
"numpy.atleast_1d",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
grohalex/Final-Project | [
"41ac4e56e1a688a5f03f81d40d99eb2f839f9a26"
] | [
"Two-Way/stuck_lattice0.py"
] | [
"# first version of two way lattice stuck position heatmap\nimport numpy as np\nimport numpy.random as rd\nimport random as random\nimport scipy\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom os import path\nfrom datetime import datetime\nnow = datetime.now()\n\n#parameters\nN = 100 # number of sites\na1 = 1 # injection probability at lattice 1\na2 = 1 # injection probability at lattice 2\nb1 = 1 # removal probability at lattice 1\nb2 = 1 # removal probability at lattice 2\nk11 = 1 # steping probability for particle 1 in lattice 1\n#k12 = 0.1 # steping probability for particle 1 to lattice 2\n#k21 = 0 # steping probability for particle 2 to lattice 1\nk22 = 1 # steping probability for particle 2 in lattice 2\n\nk12_values = [round(i, 2) for i in np.linspace(0,1,20)]#np.linspace(0,1,11)\nk21_values = np.flip(k12_values)#[round(i, 2) for i in np.linspace(0,1,11)]\nstuck_steps_matrix = np.zeros([ len(k21_values), len(k12_values)]) #init our heat maps\nmax_step = 600\naverages = 50\n#init\nL1 = np.zeros(N) #initialize lattice 1\nL2 = np.zeros(N) #initialize lattice 2\nstep = 0 #init step variable\n#init the current measurement variables 'passed_particles'\npassed_particles1 = 0 #particles which passes 0th site in latice 1\npassed_particles2 = 0 #particles which passed 0th site in latice 2\ncurrent1 = 0\ncurrent2 = 0\n\ndensities1 = np.zeros(N) #average occupation density for each site in lattice 1\ndensities2 = np.zeros(N) #average occupation density for each site in lattice 2\n#densities_a1 = np.zeros(steps_a) # densities corresponding to different initial a\n\n#update ftion\ndef update(i):\n global passed_particles1,passed_particles2\n #insertion a1\n if i==0:\n if L1[0]==0 and rd.rand()<a1:\n L1[0]=1\n passed_particles1 +=1\n #in case there site = 1 there is particle 2 which should leave\n elif i==1 and L1[0]==2 and rd.rand()<b2: #in case there is particle 2 it leaves\n L1[0]=0\n\n #insertion a2\n elif i==N+1:\n if L2[0]==0 and rd.rand()<a2:\n L2[0]=2\n passed_particles2 +=1\n #in case site = 2N+1 and there is particle 1 it leaves\n elif i==N+2 and L2[0]==1 and rd.rand()<b2: #in case there is particle 1 it leaves\n L2[0]=0\n\n #removal b1\n elif i==N and rd.rand()<b1:\n if L1[-1]>0:\n L1[-1]=0\n\n #removal b2\n elif i==2*N+1 and rd.rand()<b2:\n if L2[-1]>0:\n L2[-1]=0\n\n\n #regular site lattice 1\n elif i>0 and i < N:\n i = i-1\n #update particle 1\n if L1[i]==1:\n #make a step\n if L1[i+1]==0 and rd.rand()<k11:\n L1[i]=0\n L1[i+1]=1\n\n #overtake\n elif L1[i+1]>0 and L2[-i-2]==0 and rd.rand()<k12:\n L1[i]=0\n L2[-i-2]=1\n\n #update particle 2\n if L1[i]==2:\n\n #finish overtaking\n if L2[-i]==0 and rd.rand()<k21:\n L1[i]=0\n L2[-i]=2\n if i == int(N/2): ###\n passed_particles1 +=1 ### #adding to the current in the second lattice (super coarse)\n\n\n #continue in the opposite lane\n elif L1[i-1]==0 and rd.rand()<k21:\n L1[i]=0\n L1[i-1]=2\n if i == int(N/2): ###\n passed_particles1 +=1 ### #adding to the current in the second lattice (super coarse)\n\n #regular site lattice 2\n elif i>N and i<2*N+1:\n i = i-N-2\n assert(i>=0 and i<=N)\n\n #update particle 2\n if L2[i]==2:\n #make a step\n if L2[i+1]==0 and rd.rand()<k22:\n L2[i]=0\n L2[i+1]=2\n\n #overtake\n elif L2[i+1]>0 and L1[-i-2]==0 and rd.rand()<k21:\n L2[i]=0\n L1[-i-2]=2\n\n #update particle 1\n if L2[i]==1:\n #finish overtaking\n if L1[-i]==0 and rd.rand()<k12:\n L2[i]=0\n L1[-i]=1\n if i == int(N/2): ###\n passed_particles2 +=1 ### #adding to the current in the second lattice (super coarse)\n\n\n #continue in the opposite lane\n elif L2[i-1]==0 and rd.rand()<k12:\n if not i==0:\n L2[i]=0\n L2[i-1]=1\n\n if i == int(N/2): ###\n passed_particles2 +=1 ### #adding to the current in the second lattice (super coarse)\n\n\n#lets you update the lattice with optional parameters. This is useful in the Stuck_position()\ndef update_par(i, A1, A2, B1, B2, K11, K22, K12, K21):\n\n #insertion a1\n if i==0:\n if L1[0]==0 and rd.rand()<A1:\n L1[0]=1\n return 1\n else:\n return 0\n #in case there site = 1 there is particle 2 which should leave\n elif i==1 and L1[0]==2 and rd.rand()<B2: #in case there is particle 2 it leaves\n L1[0]=0\n return 1\n #insertion a2\n elif i==N+1:\n if L2[0]==0 and rd.rand()<A2:\n L2[0]=2\n return 1\n else:\n return 0\n #in case site = 2N+1 and there is particle 1 it leaves\n elif i==N+2 and L2[0]==1 and rd.rand()<B2: #in case there is particle 1 it leaves\n L2[0]=0\n return 1\n #removal b1\n elif i==N and rd.rand()<B1:\n if L1[-1]>0:\n L1[-1]=0\n return 1\n else:\n return 0\n #removal b2\n elif i==2*N+1 and rd.rand()<B1:\n if L2[-1]>0:\n L2[-1]=0\n return 1\n else:\n return 0\n\n #regular site lattice 1\n elif i>0 and i < N:\n i = i-1\n\n if L1[i]==0: #nothing can change with no particle available\n return 0\n\n #update particle 1\n if L1[i]==1:\n #make a step\n if L1[i+1]==0 and rd.rand()<K11:\n L1[i]=0\n L1[i+1]=1\n return 1\n\n #overtake\n elif L1[i+1]>0 and L2[-i-2]==0 and rd.rand()<K12:\n L1[i]=0\n L2[-i-2]=1\n\n return 1\n else:\n return 0\n #update particle 2\n if L1[i]==2:\n\n if L2[i]==0: #nothing can change with no particle available\n return 0\n\n #finish overtaking\n if L2[-i]==0 and rd.rand()<K21:\n L1[i]=0\n L2[-i]=2\n return 1\n\n #continue in the opposite lane\n elif L1[i-1]==0 and rd.rand()<K21:\n L1[i]=0\n L1[i-1]=2\n return 1\n else:\n return 0\n\n\n\n #regular site lattice 2\n elif i>N:\n i = i-N-2\n assert(i>=0 and i<=N)\n\n #update particle 2\n if L2[i]==2:\n #make a step\n if L2[i+1]==0 and rd.rand()<K22:\n L2[i]=0\n L2[i+1]=2\n return 1\n #overtake\n elif L2[i+1]>0 and L1[-i-2]==0 and rd.rand()<K21:\n L2[i]=0\n L1[-i-2]=2\n return 1\n else:\n return 0\n #update particle 1\n elif L2[i]==1:\n #finish overtaking\n if L1[-i]==0 and rd.rand()<K12:\n L2[i]=0\n L1[-i]=1\n return 1\n #continue in the opposite lane\n elif L2[i-1]==0 and rd.rand()<K12:\n if not i==0:\n L2[i]=0\n L2[i-1]=1\n return 1\n else:\n return 0\n else:\n return 0\n else:\n return 0\n\n#does not update the lattice but it just outputs bool whether a move is possible. This is useful in the Stuck_position()\ndef update_bool(i, A1, A2, B1, B2, K11, K22, K12, K21):\n\n #insertion a1\n if i==0:\n if L1[0]==0 and rd.rand()<A1:\n #L1[0]=1\n return 1\n else:\n return 0\n #in case there site = 1 there is particle 2 which should leave\n elif i==1 and L1[0]==2 and rd.rand()<B2: #in case there is particle 2 it leaves\n #L1[0]=0\n return 1\n #insertion a2\n elif i==N+1:\n if L2[0]==0 and rd.rand()<A2:\n #L2[0]=2\n return 1\n else:\n return 0\n #in case site = 2N+1 and there is particle 1 it leaves\n elif i==N+2 and L2[0]==1 and rd.rand()<B2: #in case there is particle 1 it leaves\n #L2[0]=0\n return 1\n #removal b1\n elif i==N and rd.rand()<B1:\n if L1[-1]>0:\n #L1[-1]=0\n return 1\n else:\n return 0\n #removal b2\n elif i==2*N+1 and rd.rand()<B1:\n if L2[-1]>0:\n #L2[-1]=0\n return 1\n else:\n return 0\n\n #regular site lattice 1\n elif i>0 and i < N:\n i = i-1\n\n if L1[i]==0: #nothing can change with no particle available\n return 0\n\n #update particle 1\n if L1[i]==1:\n #make a step\n if L1[i+1]==0 and rd.rand()<K11:\n #L1[i]=0\n #L1[i+1]=1\n return 1\n\n #overtake\n elif L1[i+1]>0 and L2[-i-2]==0 and rd.rand()<K12:\n #L1[i]=0\n #L2[-i-2]=1\n\n return 1\n else:\n return 0\n #update particle 2\n if L1[i]==2:\n\n if L2[i]==0: #nothing can change with no particle available\n return 0\n\n #finish overtaking\n if L2[-i]==0 and rd.rand()<K21:\n #L1[i]=0\n #L2[-i]=2\n return 1\n\n #continue in the opposite lane\n elif L1[i-1]==0 and rd.rand()<K21:\n #L1[i]=0\n #L1[i-1]=2\n return 1\n else:\n return 0\n\n\n\n #regular site lattice 2\n elif i>N:\n i = i-N-2\n assert(i>=0 and i<=N)\n\n #update particle 2\n if L2[i]==2:\n #make a step\n if L2[i+1]==0 and rd.rand()<K22:\n #L2[i]=0\n #L2[i+1]=2\n return 1\n #overtake\n elif L2[i+1]>0 and L1[-i-2]==0 and rd.rand()<K21:\n #L2[i]=0\n #L1[-i-2]=2\n return 1\n else:\n return 0\n #update particle 1\n elif L2[i]==1:\n #finish overtaking\n if L1[-i]==0 and rd.rand()<K12:\n #L2[i]=0\n #L1[-i]=1\n return 1\n #continue in the opposite lane\n elif L2[i-1]==0 and rd.rand()<K12:\n if not i==0:\n #L2[i]=0\n #L2[i-1]=1\n return 1\n else:\n return 0\n else:\n return 0\n else:\n return 0\n\n#display both lattices, in the correct orientation\ndef Display():\n l1 = L1\n l2 = np.flip(L2) #L2 goes in the opposite direction\n\n print(l1)\n print(l2)\n\n#another way of displaying my two lattices\ndef DisplayNice():\n l1 = L1\n l2 = np.flip(L2) #L2 goes in the opposite direction\n\n for i in l1:\n print('|', end = '')\n if i==1:\n print('o>', end = '')\n if i==2:\n print('<o', end = '')\n elif i==0:\n print(' ', end = '')\n #print('|', end = '')\n print('|', end = '')\n print('\\n')\n\n for i in l2:\n print('|', end = '')\n if i==1:\n print('o>', end = '')\n if i==2:\n print('<o', end = '')\n elif i==0:\n print(' ', end = '')\n #print('|', end = '')\n print('|', end = '')\n print('\\n')\n\n#determines whether lattice is in a stuck position, returns a bool, True=lattice is stuck\ndef stuck_position():\n unstuck = 0\n for i in range(2*N+2):\n unstuck = unstuck + update_bool(i, 1,1,1,1,1,1,1,1)\n if unstuck > 0:\n return False\n if unstuck == 0:\n return True\n\n #this checks whether the update_par returns None (that would be an error)\n#while True:\n# site = rd.randint(0,2*N+2)\n# if update_par(site, 1,1,1,1,1,1,1,1)==None:\n# print('omg no')\n\n###########################################################################\n\nfor i in range(averages):\n #changing k12:\n for ii in range(len(k12_values)):\n k12 = k12_values[ii]\n print(k12)\n for jj in range(len(k21_values)):\n k21 = k21_values[jj]\n\n #I should empty the lattice here\n L1 = np.zeros(N) #initialize lattice 1\n L2 = np.zeros(N) #initialize lattice 2\n step = 0 #init step variable\n\n while not stuck_position() and not step > max_step:\n #print('yes')\n step += 1\n for j in range(2*N+2):\n site = rd.randint(0,2*N+2)\n update(site)\n print(\"step:\", step)\n stuck_steps_matrix[jj,ii] += step/averages\n\n\n#save the matrix into a txt\nName = \"stuck_heatmapN%s_moreAverages\"%(N)\nheading = \"parameters step: %s k12_values: %s \\n k21_values %s \\n\"% (len(k12_values), k12_values, k21_values)\ndata = stuck_steps_matrix\ndata = np.array(data)\n#data = np.transpose(data)\n#fmt = \"%-10d\", \"%-10.3f\", \"%-10.3f\"\nnp.savetxt(Name, data, fmt = \"%-10d\", delimiter = \"\\t\", header = heading)\n\n\n\n\n#heat map plot:\nf1 = plt.figure()\nplt.xticks(ticks=np.arange(len(k12_values)),labels=k12_values)\nplt.yticks(ticks=np.arange(len(k21_values)),labels=k21_values)\nplt.title(\"Stuck Lattice heatmap(number of sites = %s, resolution in a = %s, b = %s)\"%(N, len(k12_values), len(k21_values)))\nplt.ylabel(\"k21 value\")\nplt.xlabel(\"k12 value\")\n# save this plot inside a variable called hm\nhm=plt.imshow(stuck_steps_matrix, cmap='hot',interpolation=\"None\")\n# pass this heatmap object into plt.colorbar method.\nplt.colorbar(hm)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"numpy.random.randint",
"numpy.random.rand",
"numpy.savetxt",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.flip",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yiruiliu110/eegnn | [
"253773c301681bb00b4789c34f48c82468ad16da"
] | [
"estimation/add_k.py"
] | [
"\"\"\"\nThis script is used to add a new cluster.\n\"\"\"\nimport torch\n\nfrom estimation.stirling_number import stirling_number\n\n\ndef build_injection(count, active_k, max_k, gamma):\n #print('count', count)\n with_sample_indices = count > 0\n\n remaining_indices = torch.squeeze(torch.cat([torch.tensor([True]), with_sample_indices[1::]], dim=0).nonzero())\n\n dict_tmp = {}\n index = 0\n for j in range(1, active_k):\n if with_sample_indices[j]:\n index += 1\n dict_tmp[j] = index\n\n old_active_K = index + 1\n add_number = stirling_number(count[0], gamma)\n\n if old_active_K + add_number <= max_k:\n new_active_K = old_active_K + add_number\n else:\n new_active_K = max_k\n\n def fn(x):\n if x == 0:\n return torch.randint(low=old_active_K, high=new_active_K, size=()).item()\n else:\n return dict_tmp[x]\n\n return fn, new_active_K, remaining_indices\n\n\ndef add_k(c, active_k, max_k, gamma):\n \"\"\"\n replace the cluster indictors of 0 to active_K+1\n :param c: a sparse matrix to indicate the cluster membership.\n :return: a new sparse matrix to indicate the cluster membership.\n \"\"\"\n indices = c._indices()\n values = c._values()\n\n values_one_hot = torch.nn.functional.one_hot(values, num_classes=active_k)\n count = torch.sum(values_one_hot, dim=0)\n\n fn, new_active_K, remaining_indices = build_injection(count, active_k, max_k, gamma)\n\n values = values.apply_(fn)\n\n c = torch.sparse_coo_tensor(indices, values, c.size())\n\n return c, new_active_K, remaining_indices\n\n\ndef switch(inputs, remaining_indices, max_k):\n remaining = torch.index_select(inputs, dim=0, index=remaining_indices)\n deleting_indices = generating_deleting_indices(max_k, remaining_indices)\n deleting = torch.index_select(inputs, dim=0, index=deleting_indices)\n outputs = torch.cat([remaining, deleting], dim=0)\n return outputs\n\n\ndef generating_deleting_indices(max_k, remaining_indices):\n deleting_indices = torch.tensor([int(item) for item in torch.arange(0, max_k) if item not in remaining_indices])\n return deleting_indices\n\n\nif __name__ == \"__main__\":\n\n i = [[0, 1, 1, 2],\n [2, 0, 2, 1]]\n v_c = [0, 1, 2, 0]\n active_K = 3\n c = torch.sparse_coo_tensor(i, v_c, (3, 3))\n\n c_new = add_k(c, active_K, max_k=10, gamma=1)\n print(c_new)"
] | [
[
"torch.randint",
"torch.cat",
"torch.sum",
"torch.sparse_coo_tensor",
"torch.tensor",
"torch.arange",
"torch.nn.functional.one_hot",
"torch.index_select"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lemswasabi/transformers | [
"1762ded30a49649bdd5f8f5ee38b46dea051026a"
] | [
"src/transformers/models/wavlm/modeling_wavlm.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch WavLM model.\"\"\"\n\nimport math\nimport warnings\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...deepspeed import is_deepspeed_zero3_enabled\nfrom ...modeling_outputs import (\n BaseModelOutput,\n CausalLMOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n Wav2Vec2BaseModelOutput,\n XVectorOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import torch_int_div\nfrom ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging\nfrom .configuration_wavlm import WavLMConfig\n\n\nlogger = logging.get_logger(__name__)\n\n\n_HIDDEN_STATES_START_POSITION = 2\n\n# General docstring\n_CONFIG_FOR_DOC = \"WavLMConfig\"\n_PROCESSOR_FOR_DOC = \"Wav2Vec2Processor\"\n\n# Base docstring\n_CHECKPOINT_FOR_DOC = \"patrickvonplaten/wavlm-libri-clean-100h-base-plus\"\n_EXPECTED_OUTPUT_SHAPE = [1, 292, 768]\n\n# CTC docstring\n_CTC_EXPECTED_OUTPUT = \"'mister quilter is the aposle of the middle classes and we are glad to welcome his gospel'\"\n_CTC_EXPECTED_LOSS = 12.51\n\n# Audio class docstring\n_FEAT_EXTRACTOR_FOR_DOC = \"Wav2Vec2FeatureExtractor\"\n_SEQ_CLASS_CHECKPOINT = \"hf-internal-testing/tiny-random-wavlm\"\n_SEQ_CLASS_EXPECTED_OUTPUT = \"'no'\" # TODO(anton) - could you quickly fine-tune a KS WavLM Model\n_SEQ_CLASS_EXPECTED_LOSS = 0.7 # TODO(anton) - could you quickly fine-tune a KS WavLM Model\n\n# Frame class docstring\n_FRAME_CLASS_CHECKPOINT = \"microsoft/wavlm-base-plus-sd\"\n_FRAME_EXPECTED_OUTPUT = [0, 0]\n\n# Speaker Verification docstring\n_XVECTOR_CHECKPOINT = \"microsoft/wavlm-base-plus-sv\"\n_XVECTOR_EXPECTED_OUTPUT = 0.97\n\nWAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/wavlm-base\",\n \"microsoft/wavlm-base-plus\",\n \"microsoft/wavlm-large\",\n # See all WavLM models at https://huggingface.co/models?filter=wavlm\n]\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices\ndef _compute_mask_indices(\n shape: Tuple[int, int],\n mask_prob: float,\n mask_length: int,\n attention_mask: Optional[torch.LongTensor] = None,\n min_masks: int = 0,\n) -> np.ndarray:\n \"\"\"\n Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for\n ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on\n CPU as part of the preprocessing during training.\n\n Args:\n shape: The shape for which to compute masks. This should be of a tuple of size 2 where\n the first element is the batch size and the second element is the length of the axis to span.\n mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of\n independently generated mask spans of length `mask_length` is computed by\n `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the\n actual percentage will be smaller.\n mask_length: size of the mask\n min_masks: minimum number of masked spans\n attention_mask: A (right-padded) attention mask which independently shortens the feature axis of\n each batch dimension.\n \"\"\"\n batch_size, sequence_length = shape\n\n if mask_length < 1:\n raise ValueError(\"`mask_length` has to be bigger than 0.\")\n\n if mask_length > sequence_length:\n raise ValueError(\n f\"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}\"\n f\" and `sequence_length`: {sequence_length}`\"\n )\n\n # epsilon is used for probabilistic rounding\n epsilon = np.random.rand(1).item()\n\n def compute_num_masked_span(input_length):\n \"\"\"Given input length, compute how many spans should be masked\"\"\"\n num_masked_span = int(mask_prob * input_length / mask_length + epsilon)\n num_masked_span = max(num_masked_span, min_masks)\n\n # make sure num masked span <= sequence_length\n if num_masked_span * mask_length > sequence_length:\n num_masked_span = sequence_length // mask_length\n\n # make sure num_masked span is also <= input_length - (mask_length - 1)\n if input_length - (mask_length - 1) < num_masked_span:\n num_masked_span = max(input_length - (mask_length - 1), 0)\n\n return num_masked_span\n\n # compute number of masked spans in batch\n input_lengths = (\n attention_mask.sum(-1).detach().tolist()\n if attention_mask is not None\n else [sequence_length for _ in range(batch_size)]\n )\n\n # SpecAugment mask to fill\n spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)\n spec_aug_mask_idxs = []\n\n max_num_masked_span = compute_num_masked_span(sequence_length)\n\n if max_num_masked_span == 0:\n return spec_aug_mask\n\n for input_length in input_lengths:\n # compute num of masked spans for this input\n num_masked_span = compute_num_masked_span(input_length)\n\n # get random indices to mask\n spec_aug_mask_idx = np.random.choice(\n np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False\n )\n\n # pick first sampled index that will serve as a dummy index to pad vector\n # to ensure same dimension for all batches due to probabilistic rounding\n # Picking first sample just pads those vectors twice.\n if len(spec_aug_mask_idx) == 0:\n # this case can only happen if `input_length` is strictly smaller then\n # `sequence_length` in which case the last token has to be a padding\n # token which we can use as a dummy mask id\n dummy_mask_idx = sequence_length - 1\n else:\n dummy_mask_idx = spec_aug_mask_idx[0]\n\n spec_aug_mask_idx = np.concatenate(\n [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]\n )\n spec_aug_mask_idxs.append(spec_aug_mask_idx)\n\n spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)\n\n # expand masked indices to masked spans\n spec_aug_mask_idxs = np.broadcast_to(\n spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)\n\n # add offset to the starting indexes so that that indexes now create a span\n offsets = np.arange(mask_length)[None, None, :]\n offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(\n batch_size, max_num_masked_span * mask_length\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs + offsets\n\n # ensure that we cannot have indices larger than sequence_length\n if spec_aug_mask_idxs.max() > sequence_length - 1:\n spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1\n\n # scatter indices to mask\n np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)\n\n return spec_aug_mask\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->WavLM\nclass WavLMNoLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->WavLM\nclass WavLMLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n\n hidden_states = hidden_states.transpose(-2, -1)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states.transpose(-2, -1)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->WavLM\nclass WavLMGroupNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->WavLM\nclass WavLMPositionalConvEmbedding(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.hidden_size,\n config.hidden_size,\n kernel_size=config.num_conv_pos_embeddings,\n padding=config.num_conv_pos_embeddings // 2,\n groups=config.num_conv_pos_embedding_groups,\n )\n\n if is_deepspeed_zero3_enabled():\n import deepspeed\n\n with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_v)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_g)\n else:\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n\n self.padding = WavLMSamePadLayer(config.num_conv_pos_embeddings)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.transpose(1, 2)\n\n hidden_states = self.conv(hidden_states)\n hidden_states = self.padding(hidden_states)\n hidden_states = self.activation(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->WavLM\nclass WavLMSamePadLayer(nn.Module):\n def __init__(self, num_conv_pos_embeddings):\n super().__init__()\n self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0\n\n def forward(self, hidden_states):\n if self.num_pad_remove > 0:\n hidden_states = hidden_states[:, :, : -self.num_pad_remove]\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->WavLM\nclass WavLMFeatureEncoder(nn.Module):\n \"\"\"Construct the features from raw audio waveform\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n if config.feat_extract_norm == \"group\":\n conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [\n WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)\n ]\n elif config.feat_extract_norm == \"layer\":\n conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]\n else:\n raise ValueError(\n f\"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']\"\n )\n self.conv_layers = nn.ModuleList(conv_layers)\n self.gradient_checkpointing = False\n self._requires_grad = True\n\n def _freeze_parameters(self):\n for param in self.parameters():\n param.requires_grad = False\n self._requires_grad = False\n\n def forward(self, input_values):\n hidden_states = input_values[:, None]\n\n # make sure hidden_states require grad for gradient_checkpointing\n if self._requires_grad and self.training:\n hidden_states.requires_grad = True\n\n for conv_layer in self.conv_layers:\n if self._requires_grad and self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(conv_layer),\n hidden_states,\n )\n else:\n hidden_states = conv_layer(hidden_states)\n\n return hidden_states\n\n\nclass WavLMFeatureExtractor(WavLMFeatureEncoder):\n def __init__(self, config):\n super().__init__(config)\n warnings.warn(\n f\"The class `{self.__class__.__name__}` has been depreciated \"\n \"and will be removed in Transformers v5. \"\n f\"Use `{self.__class__.__bases__[0].__name__}` instead.\",\n FutureWarning,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->WavLM\nclass WavLMFeatureProjection(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)\n self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)\n self.dropout = nn.Dropout(config.feat_proj_dropout)\n\n def forward(self, hidden_states):\n # non-projected hidden states are needed for quantization\n norm_hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.projection(norm_hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states, norm_hidden_states\n\n\nclass WavLMAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n num_buckets: int = 320,\n max_distance: int = 800,\n has_relative_position_bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n\n if (self.head_dim * num_heads) != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}\"\n f\" and `num_heads`: {num_heads}).\"\n )\n self.scaling = self.head_dim**-0.5\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n\n self.num_buckets = num_buckets\n self.max_distance = max_distance\n\n self.gru_rel_pos_const = nn.Parameter(torch.ones(1, self.num_heads, 1, 1))\n self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8)\n\n if has_relative_position_bias:\n self.rel_attn_embed = nn.Embedding(self.num_buckets, self.num_heads)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_bias: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n index=0,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Attention layer with relative attention\"\"\"\n bsz, tgt_len, _ = hidden_states.size()\n\n # first pass of attention layer creates position bias\n if position_bias is None:\n position_bias = self.compute_bias(tgt_len, tgt_len)\n position_bias = (\n position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, tgt_len)\n )\n\n # Compute relative position bias:\n # 1) get reshape hidden_states\n gated_hidden_states = hidden_states.view(hidden_states.shape[:-1] + (self.num_heads, -1))\n gated_hidden_states = gated_hidden_states.permute(0, 2, 1, 3)\n\n # 2) project hidden states\n relative_position_proj = self.gru_rel_pos_linear(gated_hidden_states)\n relative_position_proj = relative_position_proj.view(gated_hidden_states.shape[:-1] + (2, 4)).sum(-1)\n\n # 3) compute gate for position bias from projected hidden states\n gate_a, gate_b = torch.sigmoid(relative_position_proj).chunk(2, dim=-1)\n gate_output = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0\n\n # 4) apply gate to position bias to compute gated position_bias\n gated_position_bias = gate_output.view(bsz * self.num_heads, -1, 1) * position_bias\n gated_position_bias = gated_position_bias.view((-1, tgt_len, tgt_len))\n\n attn_output, attn_weights = self.torch_multi_head_self_attention(\n hidden_states, attention_mask, gated_position_bias, output_attentions\n )\n\n return attn_output, attn_weights, position_bias\n\n def torch_multi_head_self_attention(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Union[torch.LongTensor, torch.BoolTensor],\n gated_position_bias: torch.FloatTensor,\n output_attentions: bool,\n ) -> (torch.FloatTensor, torch.FloatTensor):\n \"\"\"simple wrapper around torch's multi_head_attention_forward function\"\"\"\n # self-attention assumes q = k = v\n query = key = value = hidden_states.transpose(0, 1)\n key_padding_mask = attention_mask.ne(1) if attention_mask is not None else None\n\n # disable bias and add_zero_attn\n bias_k = bias_v = None\n add_zero_attn = False\n\n # PyTorch 1.3.0 has F.multi_head_attention_forward defined\n # so no problem with backwards compatibility\n attn_output, attn_weights = F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n bias_k,\n bias_v,\n add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training,\n key_padding_mask,\n output_attentions,\n gated_position_bias,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n\n # [Seq_Len, Batch Size, ...] -> [Batch Size, Seq_Len, ...]\n attn_output = attn_output.transpose(0, 1)\n\n if attn_weights is not None:\n # IMPORTANT: Attention weights are averaged weights\n # here which should not be the case. This is an open issue\n # on PyTorch: https://github.com/pytorch/pytorch/issues/32590\n attn_weights = attn_weights[:, None].broadcast_to(\n attn_weights.shape[:1] + (self.num_heads,) + attn_weights.shape[1:]\n )\n\n return attn_output, attn_weights\n\n def compute_bias(self, query_length: int, key_length: int) -> torch.FloatTensor:\n context_position = torch.arange(query_length, dtype=torch.long)[:, None]\n memory_position = torch.arange(key_length, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position\n relative_position_bucket = self._relative_positions_bucket(relative_position)\n relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device)\n values = self.rel_attn_embed(relative_position_bucket)\n values = values.permute([2, 0, 1])\n return values\n\n def _relative_positions_bucket(self, relative_positions: torch.FloatTensor) -> torch.FloatTensor:\n num_buckets = self.num_buckets // 2\n\n relative_buckets = (relative_positions > 0).to(torch.long) * num_buckets\n relative_positions = torch.abs(relative_positions)\n\n max_exact = num_buckets // 2\n is_small = relative_positions < max_exact\n\n relative_positions_if_large = torch.log(relative_positions.float() / max_exact)\n relative_positions_if_large = relative_positions_if_large / math.log(self.max_distance / max_exact)\n relative_positions_if_large = relative_positions_if_large * (num_buckets - max_exact)\n relative_postion_if_large = (max_exact + relative_positions_if_large).to(torch.long)\n relative_postion_if_large = torch.min(\n relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)\n )\n\n relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)\n return relative_buckets\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->WavLM\nclass WavLMFeedForward(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate_dropout = nn.Dropout(config.activation_dropout)\n\n self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.output_dropout = nn.Dropout(config.hidden_dropout)\n\n def forward(self, hidden_states):\n hidden_states = self.intermediate_dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.intermediate_dropout(hidden_states)\n\n hidden_states = self.output_dense(hidden_states)\n hidden_states = self.output_dropout(hidden_states)\n return hidden_states\n\n\nclass WavLMEncoderLayer(nn.Module):\n def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):\n super().__init__()\n self.attention = WavLMAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n num_buckets=config.num_buckets,\n max_distance=config.max_bucket_distance,\n has_relative_position_bias=has_relative_position_bias,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = WavLMFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0):\n attn_residual = hidden_states\n hidden_states, attn_weights, position_bias = self.attention(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n index=index,\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n\n hidden_states = self.layer_norm(hidden_states)\n\n hidden_states = hidden_states + self.feed_forward(hidden_states)\n hidden_states = self.final_layer_norm(hidden_states)\n\n outputs = (hidden_states, position_bias)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass WavLMEncoderLayerStableLayerNorm(nn.Module):\n def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):\n super().__init__()\n self.attention = WavLMAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n num_buckets=config.num_buckets,\n max_distance=config.max_bucket_distance,\n has_relative_position_bias=has_relative_position_bias,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = WavLMFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states = self.layer_norm(hidden_states)\n hidden_states, attn_weights, position_bias = self.attention(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))\n\n outputs = (hidden_states, position_bias)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass WavLMEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = WavLMPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens output 0\n hidden_states[~attention_mask] = 0.0\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n position_bias = None\n\n for i, layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n position_bias,\n )\n else:\n layer_outputs = layer(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n index=i,\n )\n\n hidden_states, position_bias = layer_outputs[:2]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass WavLMEncoderStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = WavLMPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [\n WavLMEncoderLayerStableLayerNorm(config, has_relative_position_bias=(i == 0))\n for i in range(config.num_hidden_layers)\n ]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens are not attended to\n hidden_states[~attention_mask] = 0\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n position_bias = None\n\n for i, layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n position_bias,\n )\n else:\n layer_outputs = layer(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n position_bias=position_bias,\n )\n hidden_states, position_bias = layer_outputs[:2]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[2],)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions\n )\n\n\nclass WavLMGumbelVectorQuantizer(nn.Module):\n \"\"\"\n Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH\n GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.num_groups = config.num_codevector_groups\n self.num_vars = config.num_codevectors_per_group\n\n if config.codevector_dim % self.num_groups != 0:\n raise ValueError(\n f\"`config.codevector_dim {config.codevector_dim} must be divisible\"\n f\" by `config.num_codevector_groups` {self.num_groups} \"\n \"for concatenation.\"\n )\n\n # storage for codebook variables (codewords)\n self.codevectors = nn.Parameter(\n torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)\n )\n self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)\n\n # can be decayed for training\n self.temperature = 2\n\n @staticmethod\n def _compute_perplexity(probs):\n marginal_probs = probs.mean(dim=0)\n perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()\n return perplexity\n\n def forward(self, hidden_states):\n batch_size, sequence_length, hidden_size = hidden_states.shape\n\n # project to codevector dim\n hidden_states = self.weight_proj(hidden_states)\n hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)\n\n if self.training:\n # sample code vector probs via gumbel in differentiateable way\n codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True)\n codevector_probs = codevector_probs.type_as(hidden_states)\n\n # compute perplexity\n codevector_soft_dist = torch.softmax(\n hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1\n )\n perplexity = self._compute_perplexity(codevector_soft_dist)\n else:\n # take argmax in non-differentiable way\n # comptute hard codevector distribution (one hot)\n codevector_idx = hidden_states.argmax(dim=-1)\n codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(\n -1, codevector_idx.view(-1, 1), 1.0\n )\n codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)\n\n perplexity = self._compute_perplexity(codevector_probs)\n\n codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)\n # use probs to retrieve codevectors\n codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors\n codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)\n codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)\n\n return codevectors, perplexity\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->WavLM\nclass WavLMAdapter(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n # feature dim might need to be down-projected\n if config.output_hidden_size != config.hidden_size:\n self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)\n self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)\n else:\n self.proj = self.proj_layer_norm = None\n\n self.layers = nn.ModuleList(WavLMAdapterLayer(config) for _ in range(config.num_adapter_layers))\n self.layerdrop = config.layerdrop\n\n def forward(self, hidden_states):\n # down project hidden_states if necessary\n if self.proj is not None and self.proj_layer_norm is not None:\n hidden_states = self.proj(hidden_states)\n hidden_states = self.proj_layer_norm(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n\n for layer in self.layers:\n layerdrop_prob = np.random.random()\n if not self.training or (layerdrop_prob > self.layerdrop):\n hidden_states = layer(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->WavLM\nclass WavLMAdapterLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.output_hidden_size,\n 2 * config.output_hidden_size,\n config.adapter_kernel_size,\n stride=config.adapter_stride,\n padding=1,\n )\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = nn.functional.glu(hidden_states, dim=1)\n\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PreTrainedModel with Wav2Vec2->WavLM, wav2vec2->wavlm\nclass WavLMPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = WavLMConfig\n base_model_prefix = \"wavlm\"\n main_input_name = \"input_values\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n # gumbel softmax requires special init\n if isinstance(module, WavLMGumbelVectorQuantizer):\n module.weight_proj.weight.data.normal_(mean=0.0, std=1)\n module.weight_proj.bias.data.zero_()\n nn.init.uniform_(module.codevectors)\n elif isinstance(module, WavLMPositionalConvEmbedding):\n nn.init.normal_(\n module.conv.weight,\n mean=0,\n std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),\n )\n nn.init.constant_(module.conv.bias, 0)\n elif isinstance(module, WavLMFeatureProjection):\n k = math.sqrt(1 / module.projection.in_features)\n nn.init.uniform_(module.projection.weight, a=-k, b=k)\n nn.init.uniform_(module.projection.bias, a=-k, b=k)\n elif isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, nn.Conv1d):\n nn.init.kaiming_normal_(module.weight)\n\n if module.bias is not None:\n k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))\n nn.init.uniform_(module.bias, a=-k, b=k)\n\n def _get_feat_extract_output_lengths(\n self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None\n ):\n \"\"\"\n Computes the output length of the convolutional layers\n \"\"\"\n\n add_adapter = self.config.add_adapter if add_adapter is None else add_adapter\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return torch_int_div(input_length - kernel_size, stride) + 1\n\n for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):\n input_lengths = _conv_out_length(input_lengths, kernel_size, stride)\n\n if add_adapter:\n for _ in range(self.config.num_adapter_layers):\n input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)\n\n return input_lengths\n\n def _get_feature_vector_attention_mask(\n self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None\n ):\n # Effectively attention_mask.sum(-1), but not inplace to be able to run\n # on inference mode.\n non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]\n\n output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)\n output_lengths = output_lengths.to(torch.long)\n\n batch_size = attention_mask.shape[0]\n\n attention_mask = torch.zeros(\n (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device\n )\n # these two operations makes sure that all values before the output lengths idxs are attended to\n attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1\n attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()\n return attention_mask\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (WavLMEncoder, WavLMEncoderStableLayerNorm, WavLMFeatureEncoder)):\n module.gradient_checkpointing = value\n\n\nWAVLM_START_DOCSTRING = r\"\"\"\n WavLM was proposed in [WavLM: Unified Speech Representation Learning with Labeled and Unlabeled\n Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,\n Michael Zeng, Xuedong Huang.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`WavLMConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\nWAVLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n soundfile*). To prepare the array into *input_values*, the [`WavLMProcessor`] should be used for padding\n and conversion into a tensor of type *torch.FloatTensor*. See [`WavLMProcessor.__call__`] for details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip warning={true}>\n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should\n **not** be passed to avoid degraded performance when doing batched inference. For such models\n `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these\n models also yield slightly different results depending on whether `input_values` is padded or not.\n\n </Tip>\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM, WavLMBaseModelOutput->Wav2Vec2BaseModelOutput\nclass WavLMModel(WavLMPreTrainedModel):\n def __init__(self, config: WavLMConfig):\n super().__init__(config)\n self.config = config\n self.feature_extractor = WavLMFeatureEncoder(config)\n self.feature_projection = WavLMFeatureProjection(config)\n\n # model only needs masking vector if mask prob is > 0.0\n if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:\n self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())\n\n if config.do_stable_layer_norm:\n self.encoder = WavLMEncoderStableLayerNorm(config)\n else:\n self.encoder = WavLMEncoder(config)\n\n self.adapter = WavLMAdapter(config) if config.add_adapter else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.feature_extractor._freeze_parameters()\n\n def _mask_hidden_states(\n self,\n hidden_states: torch.FloatTensor,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n ):\n \"\"\"\n Masks extracted features along time axis and/or along feature axis according to\n [SpecAugment](https://arxiv.org/abs/1904.08779).\n \"\"\"\n\n # `config.apply_spec_augment` can set masking to False\n if not getattr(self.config, \"apply_spec_augment\", True):\n return hidden_states\n\n # generate indices & apply SpecAugment along time axis\n batch_size, sequence_length, hidden_size = hidden_states.size()\n\n if mask_time_indices is not None:\n # apply SpecAugment along time axis with given mask_time_indices\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n elif self.config.mask_time_prob > 0 and self.training:\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length),\n mask_prob=self.config.mask_time_prob,\n mask_length=self.config.mask_time_length,\n attention_mask=attention_mask,\n min_masks=self.config.mask_time_min_masks,\n )\n mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n\n if self.config.mask_feature_prob > 0 and self.training:\n # generate indices & apply SpecAugment along feature axis\n mask_feature_indices = _compute_mask_indices(\n (batch_size, hidden_size),\n mask_prob=self.config.mask_feature_prob,\n mask_length=self.config.mask_feature_length,\n min_masks=self.config.mask_feature_min_masks,\n )\n mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)\n mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)\n hidden_states[mask_feature_indices] = 0\n\n return hidden_states\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Wav2Vec2BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_EXPECTED_OUTPUT_SHAPE,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n extract_features = self.feature_extractor(input_values)\n extract_features = extract_features.transpose(1, 2)\n\n if attention_mask is not None:\n # compute reduced attention_mask corresponding to feature vectors\n attention_mask = self._get_feature_vector_attention_mask(\n extract_features.shape[1], attention_mask, add_adapter=False\n )\n\n hidden_states, extract_features = self.feature_projection(extract_features)\n hidden_states = self._mask_hidden_states(\n hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask\n )\n\n encoder_outputs = self.encoder(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.adapter is not None:\n hidden_states = self.adapter(hidden_states)\n\n if not return_dict:\n return (hidden_states, extract_features) + encoder_outputs[1:]\n\n return Wav2Vec2BaseModelOutput(\n last_hidden_state=hidden_states,\n extract_features=extract_features,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForCTC(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wavlm = WavLMModel(config)\n self.dropout = nn.Dropout(config.final_dropout)\n\n if config.vocab_size is None:\n raise ValueError(\n f\"You are trying to instantiate {self.__class__} with a configuration that \"\n \"does not define the vocabulary size of the language model head. Please \"\n \"instantiate the model as follows: `WavLMForCTC.from_pretrained(..., vocab_size=vocab_size)`. \"\n \"or define `vocab_size` of your model's configuration.\"\n )\n output_hidden_size = (\n config.output_hidden_size if hasattr(config, \"add_adapter\") and config.add_adapter else config.hidden_size\n )\n self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n expected_output=_CTC_EXPECTED_OUTPUT,\n expected_loss=_CTC_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, CausalLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):\n Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to\n the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n hidden_states = self.dropout(hidden_states)\n\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n\n if labels.max() >= self.config.vocab_size:\n raise ValueError(f\"Label values must be <= vocab_size: {self.config.vocab_size}\")\n\n # retrieve loss input_lengths from attention_mask\n attention_mask = (\n attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)\n )\n input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)\n\n # assuming that padded tokens are filled with -100\n # when not being attended to\n labels_mask = labels >= 0\n target_lengths = labels_mask.sum(-1)\n flattened_targets = labels.masked_select(labels_mask)\n\n # ctc_loss doesn't support fp16\n log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)\n\n with torch.backends.cudnn.flags(enabled=False):\n loss = nn.functional.ctc_loss(\n log_probs,\n flattened_targets,\n input_lengths,\n target_lengths,\n blank=self.config.pad_token_id,\n reduction=self.config.ctc_loss_reduction,\n zero_infinity=self.config.ctc_zero_infinity,\n )\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForSequenceClassification(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if hasattr(config, \"add_adapter\") and config.add_adapter:\n raise ValueError(\n \"Sequence classification does not support the use of WavLM adapters (config.add_adapter=True)\"\n )\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)\n self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_SEQ_CLASS_CHECKPOINT,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,\n expected_loss=_SEQ_CLASS_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n if attention_mask is None:\n pooled_output = hidden_states.mean(dim=1)\n else:\n padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)\n hidden_states[~padding_mask] = 0.0\n pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)\n\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with a frame classification head on top for tasks like Speaker Diarization.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForAudioFrameClassification(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if hasattr(config, \"add_adapter\") and config.add_adapter:\n raise ValueError(\n \"Audio frame classification does not support the use of WavLM adapters (config.add_adapter=True)\"\n )\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_FRAME_CLASS_CHECKPOINT,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_FRAME_EXPECTED_OUTPUT,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TokenClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n logits = self.classifier(hidden_states)\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return output\n\n return TokenClassifierOutput(\n loss=None,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss\nclass AMSoftmaxLoss(nn.Module):\n def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):\n super(AMSoftmaxLoss, self).__init__()\n self.scale = scale\n self.margin = margin\n self.num_labels = num_labels\n self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)\n self.loss = nn.CrossEntropyLoss()\n\n def forward(self, hidden_states, labels):\n labels = labels.flatten()\n weight = nn.functional.normalize(self.weight, dim=0)\n hidden_states = nn.functional.normalize(hidden_states, dim=1)\n cos_theta = torch.mm(hidden_states, weight)\n psi = cos_theta - self.margin\n\n onehot = nn.functional.one_hot(labels, self.num_labels)\n logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)\n loss = self.loss(logits, labels)\n\n return loss\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer\nclass TDNNLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]\n self.out_conv_dim = config.tdnn_dim[layer_id]\n self.kernel_size = config.tdnn_kernel[layer_id]\n self.dilation = config.tdnn_dilation[layer_id]\n\n self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)\n self.activation = nn.ReLU()\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.unsqueeze(1)\n hidden_states = nn.functional.unfold(\n hidden_states,\n (self.kernel_size, self.in_conv_dim),\n stride=(1, self.in_conv_dim),\n dilation=(self.dilation, 1),\n )\n hidden_states = hidden_states.transpose(1, 2)\n hidden_states = self.kernel(hidden_states)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForXVector(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])\n\n tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]\n self.tdnn = nn.ModuleList(tdnn_layers)\n\n self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)\n self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)\n\n self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)\n\n self.init_weights()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):\n \"\"\"\n Computes the output length of the TDNN layers\n \"\"\"\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return (input_length - kernel_size) // stride + 1\n\n for kernel_size in self.config.tdnn_kernel:\n input_lengths = _conv_out_length(input_lengths, kernel_size, 1)\n\n return input_lengths\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_XVECTOR_CHECKPOINT,\n output_type=XVectorOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_XVECTOR_EXPECTED_OUTPUT,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, XVectorOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n\n for tdnn_layer in self.tdnn:\n hidden_states = tdnn_layer(hidden_states)\n\n # Statistic Pooling\n if attention_mask is None:\n mean_features = hidden_states.mean(dim=1)\n std_features = hidden_states.std(dim=1)\n else:\n feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))\n tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)\n mean_features = []\n std_features = []\n for i, length in enumerate(tdnn_output_lengths):\n mean_features.append(hidden_states[i, :length].mean(dim=0))\n std_features.append(hidden_states[i, :length].std(dim=0))\n mean_features = torch.stack(mean_features)\n std_features = torch.stack(std_features)\n statistic_pooling = torch.cat([mean_features, std_features], dim=-1)\n\n output_embeddings = self.feature_extractor(statistic_pooling)\n logits = self.classifier(output_embeddings)\n\n loss = None\n if labels is not None:\n loss = self.objective(logits, labels)\n\n if not return_dict:\n output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return XVectorOutput(\n loss=loss,\n logits=logits,\n embeddings=output_embeddings,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] | [
[
"torch.abs",
"torch.nn.init.uniform_",
"torch.nn.functional.softmax",
"torch.nn.functional.glu",
"torch.zeros",
"torch.cat",
"torch.nn.Embedding",
"torch.FloatTensor",
"torch.where",
"torch.full_like",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.mm",
"torch.ones",
"numpy.arange",
"torch.randn",
"torch.backends.cudnn.flags",
"torch.tensor",
"torch.arange",
"torch.nn.GroupNorm",
"numpy.zeros",
"torch.ones_like",
"torch.sigmoid",
"torch.empty",
"torch.nn.init.constant_",
"numpy.put_along_axis",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.log",
"numpy.random.rand",
"torch.nn.Conv1d",
"torch.stack",
"numpy.array",
"torch.nn.functional.ctc_loss",
"torch.nn.functional.normalize",
"numpy.random.random",
"torch.nn.functional.log_softmax",
"torch.nn.utils.weight_norm",
"torch.nn.LayerNorm",
"numpy.ones",
"numpy.random.uniform",
"numpy.broadcast_to",
"torch.nn.functional.one_hot",
"torch.nn.functional.unfold",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fac2003/perceiver-pytorch | [
"b07d5154c5dee63684c59f57d02a1b405701845f"
] | [
"tests/test_multimodality_with_text_perceiver.py"
] | [
"from torch.nn import Embedding\nimport pytest\n\nfrom fixtures import *\nfrom perceiver_pytorch.modalities import InputModalityWithEmbedding\nfrom perceiver_pytorch.multi_modality_with_text_perceiver import MultiModalityWithTextPerceiver\n\n\ndef test_embedding_for_layer(text_inputs):\n text_modality = InputModalityWithEmbedding(\n name='text',\n input_channels=1, # 1 channel for long ids representing tokens\n input_axis=1, # number of axes, 2 for images\n num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)\n max_freq=8., # maximum frequency, hyperparameter depending on how fine the data is\n embedding=Embedding(32000, text_embedding_dim)\n )\n assert text_inputs.size() == (3, 512,1)\n embedded = text_modality.embedding(text_inputs)\n assert embedded.size()==(3, 512,1, 256)\n assert text_modality.embedding_for_layer(embedded=embedded.squeeze(2), layer_index=0, depth=4).size() == (3, 512, 256//4)\n\n\ndef test_multimodality_forward_image_text(image_inputs,\n text_inputs,\n targets):\n image_modality = InputModalityWithEmbedding(\n name='image',\n input_channels=3, # number of channels for each token of the input\n input_axis=2, # number of axes, 2 for images\n num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)\n max_freq=4., # maximum frequency, hyperparameter depending on how fine the data is\n )\n text_modality = InputModalityWithEmbedding(\n name='text',\n input_channels=1, # 1 channel for long ids representing tokens\n input_axis=1, # number of axes, 2 for images\n num_freq_bands=6, # number of freq bands, with original value (2 * K + 1)\n max_freq=8., # maximum frequency, hyperparameter depending on how fine the data is\n embedding=Embedding(32000, text_embedding_dim)\n )\n model = MultiModalityWithTextPerceiver(\n modalities=(image_modality, text_modality),\n depth=depth, # depth of net\n num_latent_blocks_per_layer=2,\n num_latents=12,\n # number of latents, or induced set points, or centroids. different papers giving it different names\n latent_dim=64, # latent dimension\n cross_heads=1, # number of heads for cross attention. paper said 1\n latent_heads=8, # number of heads for latent self attention, 8\n cross_dim_head=64,\n latent_dim_head=64,\n num_classes=num_classes, # output number of classes\n attn_dropout=0.,\n ff_dropout=0.,\n weight_tie_layers=True,\n # whether to weight tie layers (optional, as indicated in the diagram)\n )\n result = model({'image': image_inputs,\n 'text': text_inputs})\n assert result is not None\n"
] | [
[
"torch.nn.Embedding"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nicoroulet/thesis | [
"7b47a67b986a96633e9ee775ae96199a85995e01"
] | [
"src/Tools.py"
] | [
"\"\"\"Collection of Tools.\"\"\"\n\nimport numpy as np\nimport random\nimport os\n\ndef get_label_index(Y, bbox):\n x1, x2, y1, y2, z1, z2 = bbox\n Y_cropped = Y[x1:x2, y1:y2, z1:z2]\n labels = range(int(np.max(Y_cropped)) + 1)\n label_index = {}\n for label in labels:\n label_index[label] = np.argwhere(Y_cropped == label)\n return label_index\n\n\ndef get_voxel_of_rand_label(Y, bbox, label_index, ignore_bg=False):\n \"\"\"Random voxel from the given index, with balanced label probabilities.\n\n Args:\n Y (Numpy array): Image from which to pick the voxel.\n bbox (tuple): bounding box x1, x2, y1, y2, z1, z2 from which to\n sample the voxel.\n\n Returns:\n Numpy array: coordinates of the chosen voxel.\n\n \"\"\"\n labels = range(ignore_bg, int(np.max(Y)) + 1)\n x1, x2, y1, y2, z1, z2 = bbox\n Y_cropped = Y[x1:x2, y1:y2, z1:z2]\n while (True):\n label = np.random.choice(labels)\n try:\n voxel = random.choice(label_index[label])[:-1]\n return voxel + np.array([x1, y1, z1])\n except IndexError:\n pass\n\n\ndef get_bounding_box(X, patch_multiplicity):\n \"\"\"Get the bounding box of an image.\n\n The bounding box is the smallest box that contains all nonzero elements of\n the volume. The multiplicity defined by the generator is enforced by\n enlarging the box if needed.\n\n Args:\n X (numpy array): image volume from which to calculate the box\n patch_multiplicity (int): multiplicity enforced to dimensions of bounding box.\n\n Returns:\n tuple: xmin, xmax, ymin, ymax, zmin, zmax; 3D bounding box\n \"\"\"\n try:\n X = np.squeeze(X, axis=0)\n except ValueError:\n pass # axis 0 is not single-dimensional\n # Clear possible interpolation artifacts around actual brain.\n mask = X != bg_value\n # X = X * np.abs(X) > 0.0001\n out = []\n for ax in ((1, 2), (0, 2), (0, 1)):\n collapsed_mask = np.any(mask, axis=ax)\n\n vmin, vmax = np.where(collapsed_mask)[0][[0, -1]]\n max_size = collapsed_mask.shape[0]\n size = vmax - vmin\n # FIXME: if size % patch_multiplicity == 0, this adds innecesary size.\n new_size = size + (patch_multiplicity - size % patch_multiplicity)\n diff = new_size - size\n # Expand the box to enforce multiplicity, without exceeding the [0, max_size) interval.\n new_vmin = max(0, min(vmin - diff // 2, max_size - new_size))\n new_vmax = min(max_size, new_vmin + new_size)\n out.extend([new_vmin, new_vmax])\n return tuple(out)\n\n\ndef generate_cuboid_centered(cuboid_shape, volume_shape, center_voxel):\n \"\"\"Generate a cuboid to crop a patch, centered on a given voxel.\n\n Args:\n cuboid_shape (iterable): shape of returned cuboid.\n volume_shape (iterable): tuple width, height, depth. Volume that\n contains the returned cuboid.\n center_voxel (iterable): 3D point x, y, z that will be centered in\n the returned cuboid.\n\n Returns:\n tuple: cuboid (x1, x2, y1, y2, z1, z2) that contains `center_voxel`\n and is fully contained by `volume_shape`. The generated cuboid is,\n as much as possible, centered on `center_voxel`.\n\n \"\"\"\n x1, y1, z1 = v = np.minimum(np.maximum(0, np.array(center_voxel) -\n np.array(cuboid_shape, dtype='int') // 2),\n np.array(volume_shape) - cuboid_shape)\n x2, y2, z2 = v + cuboid_shape\n return x1, x2, y1, y2, z1, z2\n\n\ndef generate_cuboid_containing(cuboid_shape, volume_shape, contained_voxel):\n \"\"\"Generate a cuboid to crop a patch, containing a given voxel.\n\n Args:\n cuboid_shape (iterable): shape of returned cuboid.\n volume_shape (iterable): tuple width, height, depth. Volume that\n contains the returned cuboid.\n contained_voxel (iterable): 3D point x, y, z that will be contained in\n the returned cuboid.\n\n Returns:\n tuple: cuboid (x1, x2, y1, y2, z1, z2) that contains `contained_voxel`\n and is fully contained by `volume_shape`.\n\n \"\"\"\n cuboid_width, cuboid_height, cuboid_depth = cuboid_shape\n width, height, depth = volume_shape\n vx, vy, vz = contained_voxel\n x1 = np.random.randint(max(0, vx - cuboid_width),\n min(vx + 1, width - cuboid_width))\n y1 = np.random.randint(max(0, vy - cuboid_height),\n min(vy + 1, height - cuboid_height))\n z1 = np.random.randint(max(0, vz - cuboid_depth),\n min(vz + 1, depth - cuboid_depth))\n x1 = random.randrange(width - cuboid_width)\n x2 = x1 + cuboid_width\n y2 = y1 + cuboid_height\n z2 = z1 + cuboid_depth\n return x1, x2, y1, y2, z1, z2\n\n\ndef filter_modalities(all_modalities, target_modalities, x):\n \"\"\"Filter channels from x based on the given modalities.\n\n Modalities are represented on the last dimension of `x` and are the different types of images\n (t1, t2, flair, etc.). This is used to feed a dataset with extra modalities to a net that has\n been trained on a subset of them.\n\n Args:\n all_modalities (list): modalities of x.\n target_modalities (list): desired modalities.\n x (numpy array): image or batch of images to filter.\n\n Returns:\n numpy array: filtered x\n\n \"\"\"\n # TODO: this is inefficient. Furthermore, it may be innecessarily recomputed on repeated calls.\n target_indexes = [i for (i, modality) in enumerate(all_modalities)\n if modality in target_modalities]\n\n return x[..., target_indexes]\n\nbg_value = -4\n\n_model_subdir = ''\n\ndef set_model_subdir(subdir):\n global _model_subdir\n _model_subdir = subdir\n\ndef get_dataset_savedir(dataset, loss=None):\n \"\"\"Figure out savedir from a given dataset and loss function.\n\n Args:\n dataset (Dataset): the Dataset.\n loss (string or function, optional): Dataset loss. Default is\n `sparse_categorical_crossentropy`.\n\n \"\"\"\n savedir = '../models/%s/unet_%s' % (_model_subdir, dataset.name)\n if loss is not None and loss != 'sparse_categorical_crossentropy':\n savedir += '_' + (loss if isinstance(loss, str) else loss.__name__)\n return savedir\n\ndef ensure_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n"
] | [
[
"numpy.random.choice",
"numpy.squeeze",
"numpy.argwhere",
"numpy.max",
"numpy.any",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ACTCollaboration/tilec | [
"11ed8d027ad6ffac09b3e291a047f33e97673f14",
"11ed8d027ad6ffac09b3e291a047f33e97673f14",
"11ed8d027ad6ffac09b3e291a047f33e97673f14",
"11ed8d027ad6ffac09b3e291a047f33e97673f14"
] | [
"bin/planck/verify_projection.py",
"bin/test_simple_coadd.py",
"bin/archived/GRFVerify.py",
"tests/test_mix.py"
] | [
"from __future__ import print_function\nfrom orphics import maps,io,cosmology\nfrom pixell import enmap\nimport numpy as np\nimport os,sys\nfrom soapack import interfaces as sints\n\ndef get_coadd(imaps,wts,axis):\n # sum(w*m)/sum(w)\n twt = np.sum(wts,axis=axis)\n retmap = np.sum(wts*imaps,axis=axis)/twt\n retmap[~np.isfinite(retmap)] = 0\n return retmap,twt\n\ndef get_npol(array):\n if array=='545' or array=='857':return 1\n else: return 3\n\nmask = sints.get_act_mr3_crosslinked_mask('deep56',version='180323')\ndm = sints.PlanckHybrid(region=mask)\nbin_edges = np.arange(30,6000,40)\n\np1ds = {}\nfor array in dm.arrays:\n splits = dm.get_splits(array,srcfree=False)[0]\n ivars = dm.get_splits_ivar(array)[0]\n coadd,wt = get_coadd(splits,ivars,axis=0)\n npol = get_npol(array)\n for i in range(npol):\n cents,p1d = maps.binned_power(coadd[i],bin_edges=bin_edges,mask=mask)\n p1ds[array+str(i)] = p1d.copy()\n mivar = wt[i].mean()\n\n print(array,mivar)\n\nfor i in range(3):\n pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='C')\n for array in dm.arrays:\n npol = get_npol(array)\n if i<npol:\n pl.add(cents,p1ds[array+str(i)],label=array)\n pl.done(\"powers%d.png\" % i)\n",
"from __future__ import print_function\nfrom orphics import maps,io,cosmology,stats\nfrom pixell import enmap\nimport numpy as np\nimport os,sys\nfrom tilec import covtools,ilc\nfrom scipy.optimize import curve_fit\n\ndeg = 20.\npx = 2.0\n\ntheory = cosmology.default_theory()\nshape,wcs = maps.rect_geometry(width_deg=deg,px_res_arcmin=px)\nmodlmap = enmap.modlmap(shape,wcs)\nells = np.arange(modlmap.max())\ncltt = theory.lCl('TT',ells)\n\n\nmgen = maps.MapGen(shape,wcs,cltt[None,None])\nnoise = [10,20]\nngen1 = maps.MapGen(shape,wcs,(ells*0 + (noise[0]*np.pi/180./60.)**2.)[None,None])\nngen2 = maps.MapGen(shape,wcs,(ells*0 + (noise[1]*np.pi/180./60.)**2.)[None,None])\n\ncov = enmap.enmap(np.zeros((shape[0],shape[1],2,2)),wcs)\nfor i in range(2):\n for j in range(2):\n cov[...,i,j] = maps.interp(ells,cltt)(modlmap) + int(i==j) * (noise[i]*np.pi/180./60.)**2.\n\ncinv = np.linalg.inv(cov)\nnsims = 30\n\nnp.random.seed(1)\n\nbin_edges = np.arange(80,3000,40)\nbinner = stats.bin2D(modlmap,bin_edges)\n\ns = stats.Stats()\n\n\ngellmax = modlmap.max()\nells = np.arange(0,gellmax,1)\nctheory = ilc.CTheory(ells)\nslmin = 80\nminell = maps.minimum_ell(shape,wcs)\nfitmax = 600\nfitmin = slmin\ndell = 2*minell\nfbin_edges = np.arange(fitmin,fitmax,dell)\nfbinner = stats.bin2D(modlmap,fbin_edges)\nfcents = fbinner.centers\n\nfor i in range(nsims):\n print(i)\n cmb = mgen.get_map(seed=(1,i))\n n1 = ngen1.get_map(seed=(2,i))\n n2 = ngen2.get_map(seed=(3,i))\n \n\n kmap0 = enmap.fft(cmb,normalize='phys')\n kmap1 = enmap.fft(cmb+n1,normalize='phys')\n kmap2 = enmap.fft(cmb+n2,normalize='phys')\n\n kmaps = [kmap1,kmap2]\n icov = np.zeros((shape[0],shape[1],2,2))\n ncov = np.zeros((shape[0],shape[1],2,2))\n lmin = 80\n lmax = 7000\n for p in range(2):\n for q in range(2):\n power = np.real(kmaps[p]*kmaps[q].conj())\n icov[...,p,q] = covtools.signal_average(power,bin_width=80,kind=3,dlspace=True,lmin=lmin)\n #icov[...,p,q] = covtools.signal_average(enmap.enmap(cov[...,p,q],wcs),bin_width=80,kind=3,dlspace=True,lmin=lmin)\n \n ncov[...,p,q] = icov[...,p,q].copy()\n\n np.random.seed((4,i,p,q))\n stoch = (1+np.random.normal(scale=0.01))\n print(100-stoch*100.)\n ncov[...,p,q][modlmap<600] = icov[...,p,q][modlmap<600].copy() * stoch\n #ncov[modlmap<600,p,q] = cov[modlmap<600,p,q].copy()\n\n # f1 = 150 ; f2 = 150\n # ffunc = lambda d,x: fbinner.bin(maps.interp(ells,ctheory.get_theory_cls(f1,f2,a_cmb=x))(modlmap))[1]\n # res,_ = curve_fit(ffunc,fcents,fbinner.bin(power)[1],p0=[1],bounds=([0.2],[1.8])) \n # fcmb = res\n # print(fcmb)\n # cfit = maps.interp(ells,ctheory.get_theory_cls(f1,f2,a_cmb=fcmb))(modlmap)\n # ncov[modlmap<600,p,q] = cfit[modlmap<600].copy()\n\n if p==q: \n icov[modlmap<=lmin,p,q] = cov.max()*10000\n icov[modlmap>=lmax,p,q] = cov.max()*10000\n ncov[modlmap<=lmin,p,q] = cov.max()*10000\n ncov[modlmap>=lmax,p,q] = cov.max()*10000\n #io.power_crop(icov[...,p,q],200,\"dscov_%d_%d.png\" % (p,q))\n #icov[...,p,q] = cov[...,p,q]\n\n icinv = np.linalg.inv(icov)\n ncinv = np.linalg.inv(ncov)\n\n \n \n ks = np.stack([kmap1,kmap2])\n rs = np.ones((2,))\n kcoadd = np.einsum(\"i,...ij,j...->...\",rs,cinv,ks) / np.einsum(\"i,...ij,j->...\",rs,cinv,rs)\n ikcoadd = np.einsum(\"i,...ij,j...->...\",rs,icinv,ks) / np.einsum(\"i,...ij,j->...\",rs,icinv,rs)\n nkcoadd = np.einsum(\"i,...ij,j...->...\",rs,ncinv,ks) / np.einsum(\"i,...ij,j->...\",rs,ncinv,rs)\n \n p2d = np.real(kcoadd*kmap0.conj())\n cents,p1d = binner.bin(p2d)\n s.add_to_stats(\"p1d\",p1d)\n\n p2d = np.real(kmap0*kmap0.conj())\n cents,p1d0 = binner.bin(p2d)\n s.add_to_stats(\"p1d0\",p1d0)\n\n p2d = np.real(ikcoadd*kmap0.conj())\n cents,p1d = binner.bin(p2d)\n s.add_to_stats(\"ip1d\",p1d)\n\n p2d = np.real(nkcoadd*kmap0.conj())\n cents,p1d = binner.bin(p2d)\n s.add_to_stats(\"np1d\",p1d)\n\n\n\ns.get_stats()\n\np1d = s.stats['p1d']['mean']\np1d0 = s.stats['p1d0']['mean']\nip1d = s.stats['ip1d']['mean']\nnp1d = s.stats['np1d']['mean']\n\npl = io.Plotter(xyscale='loglog',scalefn = lambda x: x**2./2./np.pi,xlabel='l',ylabel='D')\npl.add(ells,cltt)\npl.add(cents,p1d)\npl.done(\"simpleilc.png\")\n\npl = io.Plotter(xyscale='linlin',xlabel='l',ylabel='D')\npl.add(cents,(p1d-p1d0)/p1d0)\npl.add(cents,(ip1d-p1d0)/p1d0,ls=\"-\")\npl.add(cents,(np1d-p1d0)/p1d0,ls=\"--\")\npl._ax.set_xlim(70,1000)\npl._ax.set_ylim(-0.02,0.02)\npl.hline()\npl.done(\"dsimpleilc.png\")\n\n",
"from __future__ import print_function\nimport matplotlib\nmatplotlib.use('Agg')\nfrom orphics import maps,io,cosmology,stats\nfrom pixell import enmap\nimport numpy as np\nfrom szar import foregrounds as fg\nimport os,sys\n\n\ndeg = 5.\npx = 1.0\nnsims = 20\n\nsave_root = sys.argv[1]\n\n#save_root = \"III\"\n#save_root = \"II\"\n#save_root = \"I\"\n#save_root = \"A\"\n#save_root = \"P\"\n\n\"\"\"\nP 0 to 2000 : 70,100,143,217,353\nI 0 to 2000 : ACT + 70,100,143,217,353\nII 2000 to 3500 : ACT + 100,143,217,353\nIII 3500 to 6000 : ACT + 143,217,353\nIV 6000 to 20000 : ACT\n\"\"\"\n\n\n\nif save_root==\"III\":\n # ACT + Planck III\n freqs = [150,150,150,150,150,90,143,217,353]\n beams = np.array([1.5,1.5,1.5,1.5,1.5,2.3,7.,5.,5.])\n noises = [30.,30.,30.,30.,20.,15.,43.,66.,200.]\n lknees = [3000.]*5 + [1000.] + [0.]*3\n alphas = [-4.]*6 + [1.]*3\n ellmins = [300]*6 + [2]*3\n ellmaxes = [6000]*6 + [6000]*3\n\n\nelif save_root==\"I\":\n # ACT + Planck I\n freqs = [150,150,150,150,150,90,70,100,143,217,353]\n beams = np.array([1.5,1.5,1.5,1.5,1.5,2.3,14.,10.,7.,5.,5.])\n noises = [30.,30.,30.,30.,20.,15.,137.,65.,43.,66.,200.]\n lknees = [3000.]*5 + [1000.] + [0.]*5\n alphas = [-4.]*6 + [1.]*5\n ellmins = [300]*6 + [2]*5\n ellmaxes = [2000]*6 + [2000]*5\n\n\nelif save_root==\"II\":\n # ACT + Planck II\n freqs = [150,150,150,150,150,90,100,143,217,353]\n beams = np.array([1.5,1.5,1.5,1.5,1.5,2.3,10.,7.,5.,5.])\n noises = [30.,30.,30.,30.,20.,15.,65.,43.,66.,200.]\n lknees = [3000.]*5 + [1000.] + [0.]*4\n alphas = [-4.]*6 + [1.]*4\n ellmins = [300]*6 + [2]*4\n ellmaxes = [3500]*6 + [3500]*4\n\nelif save_root==\"A\":\n # ACT\n freqs = [150,150,150,150,150,90]\n beams = np.array([1.5,1.5,1.5,1.5,1.5,2.3])\n noises = [30.,30.,30.,30.,20.,15.]\n lknees = [3000.]*5 + [1000.]\n alphas = [-4.]*6 \n ellmins = [300]*6 \n ellmaxes = [6000]*6 \n\nelif save_root==\"P\":\n # Planck\n freqs = [70,100,143,217,353]\n beams = np.array([14.,10.,7.,5.,5.])\n noises = [137.,65.,43.,66.,200.]\n lknees = [0.]*5\n alphas = [1.]*5\n ellmins = [2]*5\n ellmaxes = [3000]*5\n \nelif save_root==\"lownoise\":\n # lownoise\n freqs = [150,150,150,150,150,90,70,100,143,217,353]\n beams = np.array([1.5,1.5,1.5,1.5,1.5,1,1.,1.,1.,1.,1.])\n noises = np.array([1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.])*0\n lknees = [0.]*5 + [0.] + [0.]*5\n alphas = [1.]*6 + [1.]*5\n ellmins = [2]*6 + [2]*5\n ellmaxes = [20000]*6 + [20000]*5\n \nelse:\n raise ValueError\n\n\nclass ArrayGen(object):\n def __init__(self,shape,wcs,theory,freqs,beams,noises,lknees,alphas,ellmins,ellmaxes):\n\n fgn = fg.fgNoises(cosmology.defaultConstants,ksz_file='/home/msyriac/repos/szar/input/ksz_BBPS.txt',\n ksz_p_file='/home/msyriac/repos/szar/input/ksz_p_BBPS.txt',\n tsz_cib_file='/home/msyriac/repos/szar/input/sz_x_cib_template.txt',\n ksz_battaglia_test_csv=None,\n tsz_battaglia_template_csv=\"/home/msyriac/repos/szar/input/sz_template_battaglia.csv\",\n rs_template=\"/home/msyriac/repos/szar/input/fiducial_scalCls_lensed_5_5.txt\",\n rsx_template=\"/home/msyriac/repos/szar/input/fiducial_scalCls_lensed_1_5.txt\",\n components=['tsz','cibp','cibc','radps'],lmax=20000)\n \n self.modlmap = enmap.modlmap(shape,wcs)\n modlmap = self.modlmap\n self.fgn = fgn\n lmax = self.modlmap.max()\n ells = np.arange(0,lmax,1)\n ps_cmb = theory.lCl('TT',modlmap).reshape((1,1,shape[-2],shape[-1]))\n self.ps_cmb = ps_cmb\n ps_y = fgn.tsz_template(ells).reshape((1,1,ells.size))*self.fgn.c['A_tsz']*2.*np.pi*np.nan_to_num(1./ells/(ells+1.))\n ps_cibp = (fgn.c['A_cibp'] * ((ells/fgn.c['ell0sec'])) ** 2.0 *2.*np.pi*np.nan_to_num(1./ells/(ells+1.))).reshape((1,1,ells.size))\n ps_cibc = (fgn.c['A_cibc'] * ((ells/fgn.c['ell0sec'])) ** (2.-fgn.c['n_cib']) * 2.*np.pi*np.nan_to_num(1./ells/(ells+1.))).reshape((1,1,ells.size))\n ps_radps = (fgn.c['A_ps'] * ((ells/fgn.c['ell0sec'])) ** 2 * 2.*np.pi*np.nan_to_num(1./ells/(ells+1.))).reshape((1,1,ells.size))\n self.cgen = maps.MapGen(shape[-2:],wcs,ps_cmb)\n self.tgen = maps.MapGen(shape[-2:],wcs,ps_y)\n self.cibpgen = maps.MapGen(shape[-2:],wcs,ps_cibp)\n self.cibcgen = maps.MapGen(shape[-2:],wcs,ps_cibc)\n self.radpsgen = maps.MapGen(shape[-2:],wcs,ps_radps)\n self.shape = shape ; self.wcs = wcs\n self.freqs = freqs\n self.kbeams = []\n self.ngens = []\n self.n2ds = []\n for ai,nu in enumerate(self.freqs):\n self.kbeams.append(maps.gauss_beam(fwhm=beams[ai],ell=self.modlmap))\n n2d = cosmology.noise_func(self.modlmap,0,noises[ai],lknee=lknees[ai],alpha=alphas[ai],dimensionless=False,TCMB=2.7255e6)\n n2d[modlmap<ellmins[ai]] = 0\n n2d[modlmap>ellmaxes[ai]] = 0\n n2dmod = n2d.copy()\n n2dmod[modlmap>ellmaxes[ai]] = 1e90\n n2dmod[modlmap<ellmins[ai]] = 1e90\n self.n2ds.append(n2dmod.copy())\n ps_noise = n2d.reshape((1,1,shape[-2],shape[-1]))\n self.ngens.append(maps.MapGen(shape[-2:],wcs,ps_noise))\n self.ellmins = ellmins\n self.ellmaxes = ellmaxes\n \n def get_maps(self,seed=None):\n cmb = self.cgen.get_map(seed=(1,seed) if seed is not None else None)\n y = self.tgen.get_map(seed=(2,seed) if seed is not None else None)\n cibp = self.cibpgen.get_map(seed=(4,seed) if seed is not None else None)\n cibc = self.cibcgen.get_map(seed=(5,seed) if seed is not None else None)\n radps = self.radpsgen.get_map(seed=(6,seed) if seed is not None else None)\n observed = []\n for ai,nu in enumerate(self.freqs):\n tsz = self.fgn.tSZ_nu(nu) * y\n tsz += (self.fgn.cib_nu(nu) * cibp + self.fgn.cib_nu(nu) * cibc + self.fgn.rad_ps_nu(nu) * radps )\n observed.append(self._filter(self._beam(cmb+tsz,ai)+self._noise(ai,seed),ai))\n observed = np.stack(observed)\n return cmb,y,observed\n def _beam(self,imap,ai):\n return maps.filter_map(imap,self.kbeams[ai])\n def _noise(self,ai,seed=None):\n return self.ngens[ai].get_map(seed=(3,seed) if seed is not None else None)\n def _filter(self,imap,ai):\n kmask = maps.mask_kspace(self.shape,self.wcs,lmin=self.ellmins[ai],lmax=self.ellmaxes[ai])\n return maps.filter_map(imap,kmask)\n \n def get_cov(self):\n pass\n\nshape,wcs = maps.rect_geometry(width_deg=deg,px_res_arcmin=px)\ntheory = cosmology.default_theory()\n\n\n\n\nassert len(freqs)==len(beams)==len(noises)==len(lknees)==len(alphas)\npnum = 0\nagen = ArrayGen(shape,wcs,theory,freqs[pnum:],beams[pnum:],noises[pnum:],lknees[pnum:],alphas[pnum:],ellmins[pnum:],ellmaxes[pnum:])\ncmb,y,observed = agen.get_maps()\n\n\ncinv,cov = maps.ilc_cinv(agen.modlmap,agen.ps_cmb[0,0],agen.kbeams,agen.freqs,agen.n2ds,['tsz','cibp','cibc','radps'],agen.fgn,plot=False,plot_save=None,ellmaxes=ellmaxes,eigpow=True)\n\nfor ai in range(len(freqs)):\n for aj in range(len(freqs)):\n cinv[ai,aj][agen.modlmap<ellmins[ai]] = 0\n cinv[ai,aj][agen.modlmap<ellmins[aj]] = 0\n cinv[ai,aj][agen.modlmap>ellmaxes[ai]] = 0\n cinv[ai,aj][agen.modlmap>ellmaxes[aj]] = 0\n\n\n# Set up SZ frequency dependence\ndef gnu(nu_ghz,tcmb=2.7255):\n nu = 1e9*np.asarray(nu_ghz)\n hplanck = 6.62607e-34\n kboltzmann = 1.38065e-23 \n x = hplanck*nu/kboltzmann/tcmb\n coth = np.cosh(x/2.)/np.sinh(x/2.)\n return x*coth-4.\n\nyresponses = gnu(freqs)\ncresponses = yresponses*0 + 1.\nfc = maps.FourierCalc(shape[-2:],wcs)\ns = stats.Stats()\nbin_edges = np.arange(300,5000,80)\nbinner = stats.bin2D(agen.modlmap,bin_edges)\n\nfor i in range(nsims):\n cmb,y,observed = agen.get_maps()\n kmaps = []\n for j in range(len(freqs)):\n _,kmap,_ = fc.power2d(observed[j])\n km = np.nan_to_num(kmap/agen.kbeams[j])\n km[agen.modlmap>ellmaxes[j]] = 0\n km[agen.modlmap<ellmins[j]] = 0\n kmaps.append(km.copy()) \n kmaps = np.stack(kmaps)\n sc = maps.silc(kmaps,cinv,cresponses)\n sy = maps.silc(kmaps,cinv,yresponses)\n cc = maps.cilc(kmaps,cinv,cresponses,yresponses)\n cy = maps.cilc(kmaps,cinv,yresponses,cresponses)\n\n pcmb,kcmb,_ = fc.power2d(cmb,cmb)\n py,ky,_ = fc.power2d(y,y)\n psc_cmb = fc.f2power(sc,kcmb)\n pcc_cmb = fc.f2power(cc,kcmb)\n psy_y = fc.f2power(sy,ky)\n pcy_y = fc.f2power(cy,ky)\n psc = fc.f2power(sc,sc)\n pcc = fc.f2power(cc,cc)\n psy = fc.f2power(sy,sy)\n pcy = fc.f2power(cy,cy)\n\n cents,cl_cmb = binner.bin(pcmb)\n cents,cl_y = binner.bin(py)\n cents,cl_sc_cmb = binner.bin(psc_cmb)\n cents,cl_cc_cmb = binner.bin(pcc_cmb)\n cents,cl_sy_y = binner.bin(psy_y)\n cents,cl_cy_y = binner.bin(pcy_y)\n cents,cl_sc = binner.bin(psc)\n cents,cl_cc = binner.bin(pcc)\n cents,cl_sy = binner.bin(psy)\n cents,cl_cy = binner.bin(pcy)\n \n s.add_to_stats(\"cmb\",cl_cmb)\n s.add_to_stats(\"y\",cl_y)\n s.add_to_stats(\"scxcmb\",cl_sc_cmb)\n s.add_to_stats(\"ccxcmb\",cl_cc_cmb)\n s.add_to_stats(\"syxy\",cl_sy_y)\n s.add_to_stats(\"cyxy\",cl_cy_y)\n s.add_to_stats(\"sc\",cl_sc)\n s.add_to_stats(\"cc\",cl_cc)\n s.add_to_stats(\"sy\",cl_sy)\n s.add_to_stats(\"cy\",cl_cy)\n\n print(i)\ns.get_stats()\n\n\n\ncmb = s.stats['cmb']['mean']\ny = s.stats['y']['mean']\nscxcmb = s.stats['scxcmb']['mean']\nccxcmb = s.stats['ccxcmb']['mean']\nsyxy = -s.stats['syxy']['mean']\ncyxy = -s.stats['cyxy']['mean']\nescxcmb = s.stats['scxcmb']['err']\neccxcmb = s.stats['ccxcmb']['err']\nesyxy = s.stats['syxy']['err']\necyxy = s.stats['cyxy']['err']\nsc = s.stats['sc']['mean']\ncc = s.stats['cc']['mean']\nsy = s.stats['sy']['mean']\ncy = s.stats['cy']['mean']\n\nsc_noise = binner.bin(maps.silc_noise(cinv,cresponses))[1]\nsy_noise = binner.bin(maps.silc_noise(cinv,yresponses))[1]\ncc_noise = binner.bin(maps.cilc_noise(cinv,cresponses,yresponses))[1]\ncy_noise = binner.bin(maps.cilc_noise(cinv,yresponses,cresponses))[1]\nsn = maps.silc_noise(cinv,cresponses)\nsn[sn<-1e30] = -1e30\nsn[sn>1e30] = 1e30\n#print(sn.min(),sn.max())\n#io.plot_img(np.log10(np.fft.fftshift(sn)))\n\n\ntt = binner.bin(agen.ps_cmb)[1]\n\nlss,snls = np.loadtxt(\"/home/msyriac/repos/halofg/data/smica_nls.txt\",unpack=True)\nlsl,lnls = np.loadtxt(\"/home/msyriac/repos/halofg/data/lgmca_nls.txt\",unpack=True)\nsnls = snls[lss<3000]\nlss = lss[lss<3000]\nlnls = lnls[lsl<3000]\nlsl = lsl[lsl<3000]\n\n\npl = io.Plotter(yscale='log',xlabel='l',ylabel='D')\npl.add(cents,cmb*cents**2.,lw=2,color='k')\npl.add_err(cents,scxcmb*cents**2.,yerr=escxcmb*cents**2,lw=1,label=\"ilc\",marker=\"o\",color=\"C0\")\npl.add_err(cents,ccxcmb*cents**2.,yerr=eccxcmb*cents**2,lw=1,label=\"cilc\",marker=\"o\",color=\"C1\")\npl.add(cents,sc*cents**2.,lw=1,ls=\"--\",color=\"C0\")\npl.add(cents,cc*cents**2.,lw=1,ls=\"--\",color=\"C1\")\npl.add(cents,(sc_noise-tt)*cents**2.,lw=1,ls=\"-.\",color=\"C0\")\npl.add(cents,(cc_noise-tt)*cents**2.,lw=1,ls=\"-.\",color=\"C1\")\npl.add(lss,(snls)*lss**2./maps.gauss_beam(lss,5.)**2.,lw=1,ls=\"-.\",color=\"C2\",label='smica',alpha=0.5)\npl.add(lsl,(lnls)*lsl**2./maps.gauss_beam(lss,5.)**2.,lw=1,ls=\"-.\",color=\"C3\",label='lgmca',alpha=0.5)\npl._ax.set_ylim(1e1,3e4)\npl.legend(loc='lower center')\npl.done(save_root+\"cmb.png\")\n\n\npl = io.Plotter(yscale='log',xlabel='l',ylabel='D')\npl.add(cents,y*cents**2.,lw=2,color='k')\npl.add_err(cents,syxy*cents**2.,yerr=esyxy*cents**2,lw=1,label=\"ilc\",marker=\"o\",color=\"C0\")\npl.add_err(cents,cyxy*cents**2.,yerr=ecyxy*cents**2,lw=1,label=\"cilc\",marker=\"o\",color=\"C1\")\npl.add(cents,sy*cents**2.,lw=1,ls=\"--\",color=\"C0\")\npl.add(cents,cy*cents**2.,lw=1,ls=\"--\",color=\"C1\")\npl.add(cents,sy_noise*cents**2.,lw=1,ls=\"-.\",color=\"C0\")\npl.add(cents,cy_noise*cents**2.,lw=1,ls=\"-.\",color=\"C1\")\npl._ax.set_ylim(2e0,2e4)\npl.done(save_root+\"y.png\")\n\n\n\n",
"from tilec import fg as tfg\nimport numpy as np\nimport glob\nimport pickle\nimport os\ndirname = os.path.dirname(os.path.abspath(__file__))\n\nversion = dirname + \"/data/MM_20200307\"\n\n\ndef load_pickle(pickle_file):\n try:\n with open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f)\n except UnicodeDecodeError as e:\n with open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f, encoding='latin1')\n except Exception as e:\n print('Unable to load data ', pickle_file, ':', e)\n raise\n return pickle_data\n\n\ndef test_fg_mix():\n fdict = tfg.get_test_fdict()\n fdict0 = load_pickle(\"%s_fdict.pkl\" % version)\n for key in fdict0['mix0'].keys():\n #print(\"mix0\",key)\n assert np.all(np.isclose(fdict0['mix0'][key],fdict['mix0'][key]))\n\n for comp in fdict0['mix1'].keys():\n for key in fdict0['mix1'][comp].keys():\n #print(\"mix1\",comp,key)\n #print(fdict0['mix1'][comp][key],fdict['mix1'][comp][key])\n assert np.isclose(fdict0['mix1'][comp][key],fdict['mix1'][comp][key])\n\n\ndef test_conversions():\n assert np.isclose(tfg.ItoDeltaT(545)/1e26,0.017483363768883677)\n for f in np.geomspace(1,1000,1000):\n assert np.isclose(tfg.ItoDeltaT(f),1/tfg.dBnudT(f))\n \n"
] | [
[
"numpy.arange",
"numpy.sum",
"numpy.isfinite"
],
[
"numpy.random.seed",
"numpy.einsum",
"numpy.linalg.inv",
"numpy.arange",
"numpy.stack",
"numpy.ones",
"numpy.random.normal",
"numpy.zeros"
],
[
"numpy.cosh",
"numpy.asarray",
"matplotlib.use",
"numpy.arange",
"numpy.nan_to_num",
"numpy.stack",
"numpy.sinh",
"numpy.array",
"numpy.loadtxt"
],
[
"numpy.geomspace",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yuxuibbs/MCC-Competition-Docs | [
"384726c41434c5a07becb6438c3d2409c6ca6eb4"
] | [
"website/test.py"
] | [
"import pandas as pd\nimport numpy as np\nimport jellyfish\n\ndef create_heats(df, event, num_heats):\n counter = 0\n for row_num, registration_status in enumerate(df[event]):\n if registration_status != '0':\n df.loc[row_num, event] = counter % num_heats + 1\n counter += 1\n\n\nallEventsDict = {\"222\" : \"2x2 Cube\",\n \"333\" : \"Rubik's Cube\",\n \"333oh\" : \"Rubik's Cube: One-Handed\",\n \"333bf\" : \"Rubik's Cube: Blindfolded\",\n \"333fm\" : \"Rubik's Cube: Fewest moves\",\n \"333ft\" : \"Rubik's Cube: With feet\",\n \"333mbf\": \"Rubik's Cube: Multiple Blindfolded\",\n \"444\" : \"4x4 Cube\",\n \"444bf\" : \"4x4 Cube: Blindfolded\",\n \"555\" : \"5x5 Cube\",\n \"555bf\" : \"5x5 Cube: Blindfolded\",\n \"666\" : \"6x6 Cube\",\n \"777\" : \"7x7 Cube\",\n \"clock\" : \"Rubik's Clock\",\n \"minx\" : \"Megaminx\",\n \"pyram\" : \"Pyraminx\",\n \"skewb\" : \"Skewb\",\n \"sq1\" : \"Square-1\"}\n\n\ninput_file = '/home/yuxuan/CubeToaster/Heats/ImaginationStation.csv'\n\nnum_heats = {'222' : 4,\n '333' : 8,\n '333oh' : 2,\n '555' : 3,\n '666' : 2,\n 'minx' : 2\n }\n\ncomp_events = []\n\ndf = pd.read_csv(input_file, dtype=str, sep=',').drop(['Status', 'Country', 'Birth Date', 'Gender', 'Email', 'Guests', 'IP'], axis=1)\n\n\n# df = df.replace('0', np.NaN)\ndf['staff'] = 0\n\nfor event in allEventsDict:\n if event in df:\n comp_events.append(event)\n create_heats(df, event, num_heats[event])\n\ndf['FirstName'] = (df['Name'].str.split(expand=True)[0])\ndf['MRA'] = df['FirstName'].apply(jellyfish.match_rating_codex)\n\nprint(df.head(50))\n\nfor event in comp_events:\n grouped_df = df.groupby(event)\n for key, item in grouped_df:\n if key != '0':\n print(key)\n print(grouped_df.get_group(key)[['Name', event, 'MRA']].sort_values(by='MRA'))\n print()\n print()\n\ndf.to_csv('test.csv')"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hfurkanbozkurt/ludwig | [
"bfcbd52237c73702764e733ede4351e0146394bd"
] | [
"ludwig/features/category_feature.py"
] | [
"#! /usr/bin/env python\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\nfrom typing import Any, Dict, List, Union\n\nimport numpy as np\nimport torch\n\nfrom ludwig.constants import (\n ACCURACY,\n CATEGORY,\n COLUMN,\n FILL_WITH_CONST,\n HIDDEN,\n HITS_AT_K,\n LOGITS,\n LOSS,\n MISSING_VALUE_STRATEGY_OPTIONS,\n NAME,\n PREDICTIONS,\n PROBABILITIES,\n PROBABILITY,\n PROC_COLUMN,\n PROJECTION_INPUT,\n SOFTMAX_CROSS_ENTROPY,\n SUM,\n TIED,\n TYPE,\n)\nfrom ludwig.features.base_feature import BaseFeatureMixin, InputFeature, OutputFeature, PredictModule\nfrom ludwig.utils import output_feature_utils\nfrom ludwig.utils.eval_utils import ConfusionMatrix\nfrom ludwig.utils.math_utils import int_type, softmax\nfrom ludwig.utils.misc_utils import set_default_value, set_default_values\nfrom ludwig.utils.strings_utils import create_vocabulary, UNKNOWN_SYMBOL\n\nlogger = logging.getLogger(__name__)\n\n\nclass _CategoryPreprocessing(torch.nn.Module):\n def __init__(self, metadata: Dict[str, Any]):\n super().__init__()\n self.str2idx = metadata[\"str2idx\"]\n self.unk = self.str2idx[UNKNOWN_SYMBOL]\n\n def forward(self, v: Union[List[str], torch.Tensor]):\n if isinstance(v, torch.Tensor):\n raise ValueError(f\"Unsupported input: {v}\")\n indices = [self.str2idx.get(s.strip(), self.unk) for s in v]\n return torch.tensor(indices, dtype=torch.int32)\n\n\nclass _CategoryPostprocessing(torch.nn.Module):\n def __init__(self, metadata: Dict[str, Any]):\n super().__init__()\n self.idx2str = {i: v for i, v in enumerate(metadata[\"idx2str\"])}\n self.predictions_key = PREDICTIONS\n self.probabilities_key = PROBABILITIES\n self.unk = \"\"\n\n def forward(self, preds: Dict[str, torch.Tensor]) -> Dict[str, Any]:\n predictions = preds[self.predictions_key]\n inv_preds = [self.idx2str.get(pred, self.unk) for pred in predictions]\n return {\n self.predictions_key: inv_preds,\n self.probabilities_key: preds[self.probabilities_key],\n }\n\n\nclass _CategoryPredict(PredictModule):\n def forward(self, inputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:\n logits = output_feature_utils.get_output_feature_tensor(inputs, feature_name, self.logits_key)\n probabilities = torch.softmax(logits, -1)\n predictions = torch.argmax(logits, -1)\n predictions = predictions.long()\n\n # EXPECTED SHAPE OF RETURNED TENSORS\n # predictions: [batch_size]\n # probabilities: [batch_size, num_classes]\n # logits: [batch_size, num_classes]\n return {self.predictions_key: predictions, self.probabilities_key: probabilities, self.logits_key: logits}\n\n\nclass CategoryFeatureMixin(BaseFeatureMixin):\n @staticmethod\n def type():\n return CATEGORY\n\n @staticmethod\n def preprocessing_defaults():\n return {\n \"most_common\": 10000,\n \"lowercase\": False,\n \"missing_value_strategy\": FILL_WITH_CONST,\n \"fill_value\": UNKNOWN_SYMBOL,\n }\n\n @staticmethod\n def preprocessing_schema():\n return {\n \"most_common\": {\"type\": \"integer\", \"minimum\": 0},\n \"lowercase\": {\"type\": \"boolean\"},\n \"missing_value_strategy\": {\"type\": \"string\", \"enum\": MISSING_VALUE_STRATEGY_OPTIONS},\n \"fill_value\": {\"type\": \"string\"},\n \"computed_fill_value\": {\"type\": \"string\"},\n }\n\n @staticmethod\n def cast_column(column, backend):\n return column\n\n @staticmethod\n def get_feature_meta(column, preprocessing_parameters, backend):\n column = column.astype(str)\n idx2str, str2idx, str2freq, _, _, _, _ = create_vocabulary(\n column,\n \"stripped\",\n num_most_frequent=preprocessing_parameters[\"most_common\"],\n lowercase=preprocessing_parameters[\"lowercase\"],\n add_special_symbols=False,\n processor=backend.df_engine,\n )\n return {\"idx2str\": idx2str, \"str2idx\": str2idx, \"str2freq\": str2freq, \"vocab_size\": len(str2idx)}\n\n @staticmethod\n def feature_data(column, metadata):\n return column.map(\n lambda x: (\n metadata[\"str2idx\"][x.strip()]\n if x.strip() in metadata[\"str2idx\"]\n else metadata[\"str2idx\"][UNKNOWN_SYMBOL]\n )\n ).astype(int_type(metadata[\"vocab_size\"]))\n\n @staticmethod\n def add_feature_data(\n feature_config, input_df, proc_df, metadata, preprocessing_parameters, backend, skip_save_processed_input\n ):\n proc_df[feature_config[PROC_COLUMN]] = CategoryFeatureMixin.feature_data(\n input_df[feature_config[COLUMN]].astype(str),\n metadata[feature_config[NAME]],\n )\n\n return proc_df\n\n\nclass CategoryInputFeature(CategoryFeatureMixin, InputFeature):\n encoder = \"dense\"\n\n def __init__(self, feature, encoder_obj=None):\n super().__init__(feature)\n self.overwrite_defaults(feature)\n if encoder_obj:\n self.encoder_obj = encoder_obj\n else:\n self.encoder_obj = self.initialize_encoder(feature)\n\n def forward(self, inputs):\n assert isinstance(inputs, torch.Tensor)\n assert (\n inputs.dtype == torch.int8\n or inputs.dtype == torch.int16\n or inputs.dtype == torch.int32\n or inputs.dtype == torch.int64\n )\n assert len(inputs.shape) == 1 or (len(inputs.shape) == 2 and inputs.shape[1] == 1)\n\n if len(inputs.shape) == 1:\n inputs = inputs.unsqueeze(dim=1)\n\n if inputs.dtype == torch.int8 or inputs.dtype == torch.int16:\n inputs = inputs.type(torch.int)\n encoder_output = self.encoder_obj(inputs)\n\n return {\"encoder_output\": encoder_output}\n\n @property\n def input_dtype(self):\n return torch.int32\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size([1])\n\n @property\n def output_shape(self) -> torch.Size:\n return torch.Size(self.encoder_obj.output_shape)\n\n @staticmethod\n def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):\n input_feature[\"vocab\"] = feature_metadata[\"idx2str\"]\n\n @staticmethod\n def populate_defaults(input_feature):\n set_default_value(input_feature, TIED, None)\n\n @staticmethod\n def create_preproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n return _CategoryPreprocessing(metadata)\n\n\nclass CategoryOutputFeature(CategoryFeatureMixin, OutputFeature):\n decoder = \"classifier\"\n loss = {TYPE: SOFTMAX_CROSS_ENTROPY}\n metric_functions = {LOSS: None, ACCURACY: None, HITS_AT_K: None}\n default_validation_metric = ACCURACY\n num_classes = 0\n top_k = 3\n\n def __init__(self, feature, output_features: Dict[str, OutputFeature]):\n super().__init__(feature, output_features)\n self.overwrite_defaults(feature)\n self.decoder_obj = self.initialize_decoder(feature)\n self._setup_loss()\n self._setup_metrics()\n\n def logits(self, inputs, **kwargs): # hidden\n hidden = inputs[HIDDEN]\n\n # EXPECTED SHAPES FOR RETURNED TENSORS\n # logits: shape [batch_size, num_classes]\n # hidden: shape [batch_size, size of final fully connected layer]\n return {LOGITS: self.decoder_obj(hidden), PROJECTION_INPUT: hidden}\n\n def create_predict_module(self) -> PredictModule:\n return _CategoryPredict()\n\n def get_prediction_set(self):\n return {PREDICTIONS, PROBABILITIES, LOGITS}\n\n @property\n def input_shape(self) -> torch.Size:\n return torch.Size([self.input_size])\n\n @classmethod\n def get_output_dtype(cls):\n return torch.int64\n\n @property\n def output_shape(self) -> torch.Size:\n return torch.Size([1])\n\n def metric_kwargs(self):\n return dict(top_k=self.top_k)\n\n @staticmethod\n def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n output_feature[\"num_classes\"] = feature_metadata[\"vocab_size\"]\n output_feature[\"top_k\"] = min(output_feature[\"num_classes\"], output_feature[\"top_k\"])\n\n if isinstance(output_feature[LOSS][\"class_weights\"], (list, tuple)):\n if len(output_feature[LOSS][\"class_weights\"]) != output_feature[\"num_classes\"]:\n raise ValueError(\n \"The length of class_weights ({}) is not compatible with \"\n \"the number of classes ({}) for feature {}. \"\n \"Check the metadata JSON file to see the classes \"\n \"and their order and consider there needs to be a weight \"\n \"for the <UNK> class too.\".format(\n len(output_feature[LOSS][\"class_weights\"]),\n output_feature[\"num_classes\"],\n output_feature[COLUMN],\n )\n )\n\n if isinstance(output_feature[LOSS][\"class_weights\"], dict):\n if feature_metadata[\"str2idx\"].keys() != output_feature[LOSS][\"class_weights\"].keys():\n raise ValueError(\n \"The class_weights keys ({}) are not compatible with \"\n \"the classes ({}) of feature {}. \"\n \"Check the metadata JSON file to see the classes \"\n \"and consider there needs to be a weight \"\n \"for the <UNK> class too.\".format(\n output_feature[LOSS][\"class_weights\"].keys(),\n feature_metadata[\"str2idx\"].keys(),\n output_feature[COLUMN],\n )\n )\n else:\n class_weights = output_feature[LOSS][\"class_weights\"]\n idx2str = feature_metadata[\"idx2str\"]\n class_weights_list = [class_weights[s] for s in idx2str]\n output_feature[LOSS][\"class_weights\"] = class_weights_list\n\n if output_feature[LOSS][\"class_similarities_temperature\"] > 0:\n if \"class_similarities\" in output_feature[LOSS]:\n similarities = output_feature[LOSS][\"class_similarities\"]\n temperature = output_feature[LOSS][\"class_similarities_temperature\"]\n\n curr_row = 0\n first_row_length = 0\n is_first_row = True\n for row in similarities:\n if is_first_row:\n first_row_length = len(row)\n is_first_row = False\n curr_row += 1\n else:\n curr_row_length = len(row)\n if curr_row_length != first_row_length:\n raise ValueError(\n \"The length of row {} of the class_similarities \"\n \"of {} is {}, different from the length of \"\n \"the first row {}. All rows must have \"\n \"the same length.\".format(\n curr_row, output_feature[COLUMN], curr_row_length, first_row_length\n )\n )\n else:\n curr_row += 1\n all_rows_length = first_row_length\n\n if all_rows_length != len(similarities):\n raise ValueError(\n \"The class_similarities matrix of {} has \"\n \"{} rows and {} columns, \"\n \"their number must be identical.\".format(\n output_feature[COLUMN], len(similarities), all_rows_length\n )\n )\n\n if all_rows_length != output_feature[\"num_classes\"]:\n raise ValueError(\n \"The size of the class_similarities matrix of {} is \"\n \"{}, different from the number of classes ({}). \"\n \"Check the metadata JSON file to see the classes \"\n \"and their order and \"\n \"consider <UNK> class too.\".format(\n output_feature[COLUMN], all_rows_length, output_feature[\"num_classes\"]\n )\n )\n\n similarities = np.array(similarities, dtype=np.float32)\n for i in range(len(similarities)):\n similarities[i, :] = softmax(similarities[i, :], temperature=temperature)\n\n output_feature[LOSS][\"class_similarities\"] = similarities\n else:\n raise ValueError(\n \"class_similarities_temperature > 0, \"\n \"but no class_similarities are provided \"\n \"for feature {}\".format(output_feature[COLUMN])\n )\n\n @staticmethod\n def calculate_overall_stats(predictions, targets, train_set_metadata):\n overall_stats = {}\n confusion_matrix = ConfusionMatrix(targets, predictions[PREDICTIONS], labels=train_set_metadata[\"idx2str\"])\n overall_stats[\"confusion_matrix\"] = confusion_matrix.cm.tolist()\n overall_stats[\"overall_stats\"] = confusion_matrix.stats()\n overall_stats[\"per_class_stats\"] = confusion_matrix.per_class_stats()\n\n return overall_stats\n\n def postprocess_predictions(\n self,\n predictions,\n metadata,\n output_directory,\n backend,\n ):\n predictions_col = f\"{self.feature_name}_{PREDICTIONS}\"\n if predictions_col in predictions:\n if \"idx2str\" in metadata:\n predictions[predictions_col] = backend.df_engine.map_objects(\n predictions[predictions_col], lambda pred: metadata[\"idx2str\"][pred]\n )\n\n probabilities_col = f\"{self.feature_name}_{PROBABILITIES}\"\n if probabilities_col in predictions:\n prob_col = f\"{self.feature_name}_{PROBABILITY}\"\n predictions[prob_col] = predictions[probabilities_col].map(max)\n predictions[probabilities_col] = backend.df_engine.map_objects(\n predictions[probabilities_col], lambda pred: pred.tolist()\n )\n if \"idx2str\" in metadata:\n for i, label in enumerate(metadata[\"idx2str\"]):\n key = f\"{probabilities_col}_{label}\"\n\n # Use default param to force a capture before the loop completes, see:\n # https://stackoverflow.com/questions/2295290/what-do-lambda-function-closures-capture\n predictions[key] = backend.df_engine.map_objects(\n predictions[probabilities_col],\n lambda prob, i=i: prob[i],\n )\n\n top_k_col = f\"{self.feature_name}_predictions_top_k\"\n if top_k_col in predictions:\n if \"idx2str\" in metadata:\n predictions[top_k_col] = backend.df_engine.map_objects(\n predictions[top_k_col], lambda pred_top_k: [metadata[\"idx2str\"][pred] for pred in pred_top_k]\n )\n\n return predictions\n\n @staticmethod\n def populate_defaults(output_feature):\n # If Loss is not defined, set an empty dictionary\n set_default_value(output_feature, LOSS, {})\n\n # Populate the default values for LOSS if they aren't defined already\n set_default_values(\n output_feature[LOSS],\n {\n TYPE: \"softmax_cross_entropy\",\n \"labels_smoothing\": 0,\n \"class_weights\": 1,\n \"robust_lambda\": 0,\n \"confidence_penalty\": 0,\n \"class_similarities_temperature\": 0,\n \"weight\": 1,\n },\n )\n\n set_default_values(\n output_feature, {\"top_k\": 3, \"dependencies\": [], \"reduce_input\": SUM, \"reduce_dependencies\": SUM}\n )\n\n @staticmethod\n def create_postproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n return _CategoryPostprocessing(metadata)\n"
] | [
[
"torch.Size",
"torch.softmax",
"torch.tensor",
"numpy.array",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZongSingHuang/Elite-Opposition-Based-Golden-Sine-Whale-Optimization-Algorithm | [
"468b89aaa9cae46b87ce9595cd76b5f97f6c8553"
] | [
"EGolden_SWOA.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 27 15:45:56 2020\n\n@author: ZongSing_NB\n\nMain reference:\nhttp://www.ejournal.org.cn/EN/10.3969/j.issn.0372-2112.2019.10.020\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass EGolden_SWOA():\n def __init__(self, fitness, D=30, P=20, G=500, ub=1, lb=0,\n b=1, a_max=2, a_min=0, a2_max=-1, a2_min=-2, l_max=1, l_min=-1):\n self.fitness = fitness\n self.D = D\n self.P = P\n self.G = G\n self.ub = ub\n self.lb = lb\n self.a_max = a_max\n self.a_min = a_min\n self.a2_max = a2_max\n self.a2_min = a2_min\n self.l_max = l_max\n self.l_min = l_min\n self.b = b\n \n self.gbest_X = np.zeros([self.D])\n self.gbest_F = np.inf\n self.loss_curve = np.zeros(self.G)\n \n def opt(self):\n # 初始化\n self.X = np.random.uniform(low=self.lb, high=self.ub, size=[self.P, self.D])\n tao = (np.sqrt(5)-1)/2\n x1 = -np.pi+(1-tao)\n x2 = -np.pi+tao*2*np.pi\n \n # 迭代\n for g in range(self.G):\n # OBL\n self.X, F = self.OBL()\n \n # 更新最佳解\n if np.min(F) < self.gbest_F:\n idx = F.argmin()\n self.gbest_X = self.X[idx].copy()\n self.gbest_F = F.min()\n \n # 收斂曲線\n self.loss_curve[g] = self.gbest_F\n \n # 更新\n a = self.a_max - (self.a_max-self.a_min)*(g/self.G)\n \n for i in range(self.P):\n p = np.random.uniform()\n r1 = np.random.uniform()\n r2 = np.random.uniform()\n A = 2*a*r1 - a\n C = 2*r2\n \n if np.abs(A)>=1:\n X_rand = self.X[np.random.randint(low=0, high=self.P, size=self.D), :]\n X_rand = np.diag(X_rand).copy()\n D = np.abs(C*X_rand - self.X[i, :])\n self.X[i, :] = X_rand - A*D # (4)\n else:\n if p<0.5:\n D = np.abs(C*self.gbest_X - self.X[i, :])\n self.X[i, :] = self.gbest_X - A*D # (1)\n else:\n r3 = 2*np.pi*np.random.uniform()\n r4 = np.pi*np.random.uniform()\n self.X[i, :] = self.X[i, :]*np.abs(np.sin(r3)) + \\\n r4*np.sin(r3)*np.abs(x1*self.gbest_X-x2*self.X[i, :]) # (9)\n \n # 邊界處理\n self.X = np.clip(self.X, self.lb, self.ub)\n \n \n def plot_curve(self):\n plt.figure()\n plt.title('loss curve ['+str(round(self.loss_curve[-1], 3))+']')\n plt.plot(self.loss_curve, label='loss')\n plt.grid()\n plt.legend()\n plt.show()\n\n def OBL(self):\n # 產生反向解\n k = np.random.uniform()\n alpha = self.X.min(axis=0)\n beta = self.X.max(axis=0)\n obl_X = k*(alpha+beta) - self.X # (5)\n \n # 對反向解進行邊界處理\n rand_X = np.random.uniform(low=alpha, high=beta, size=[self.P, self.D]) # (6)\n mask = np.logical_or(obl_X>self.ub, obl_X<self.lb)\n obl_X[mask] = rand_X[mask].copy()\n \n # 取得新解\n concat_X = np.vstack([obl_X, self.X])\n F = self.fitness(concat_X)\n top_idx = F.argsort()[:self.P]\n top_F = F[top_idx].copy()\n top_X = concat_X[top_idx].copy()\n \n return top_X, top_F\n\n"
] | [
[
"numpy.diag",
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.abs",
"numpy.clip",
"numpy.min",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.logical_or",
"numpy.random.randint",
"matplotlib.pyplot.grid",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qAp/kgl_deepfake | [
"d3ee36d704d82d5d72068ea16276a88b5746c8de",
"d3ee36d704d82d5d72068ea16276a88b5746c8de"
] | [
"face_detection/lightDSFD/data/widerface.py",
"face_detection/video_utils.py"
] | [
"from __future__ import division , print_function\n\"\"\"WIDER Face Dataset Classes\nauthor: swordli\n\"\"\"\n#from .config import HOME\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport cv2\nimport numpy as np\nsys.path.append(\"/f/home/jianli/code/s3fd.180716/\")\n#from utils.augmentations import SSDAugmentation\nimport scipy.io\nimport pdb\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\nWIDERFace_CLASSES = ['face'] # always index 0\n# note: if you used our download scripts, this should be right\nWIDERFace_ROOT = \"/data/home/swordli/widerface_data/\"\n\n\nclass WIDERFaceAnnotationTransform(object):\n \"\"\"Transforms a WIDERFace annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n\n Arguments:\n class_to_ind (dict, optional): dictionary lookup of classnames -> indexes\n (default: alphabetic indexing of VOC's 20 classes)\n keep_difficult (bool, optional): keep difficult instances or not\n (default: False)\n height (int): height\n width (int): width\n \"\"\"\n\n def __init__(self, class_to_ind=None):\n self.class_to_ind = class_to_ind or dict(\n zip(WIDERFace_CLASSES, range(len(WIDERFace_CLASSES))))\n\n def __call__(self, target, width, height):\n \"\"\"\n Arguments:\n target (annotation) : the target annotation to be made usable\n will be an ET.Element\n Returns:\n a list containing lists of bounding boxes [bbox coords, class name]\n \"\"\"\n for i in range(len(target)):\n\n '''\n if target[i][0] < 2 : target[i][0] = 2\n if target[i][1] < 2 : target[i][1] = 2\n if target[i][2] > width-2 : target[i][2] = width - 2\n if target[i][3] > height-2 : target[i][3] = height - 2\n '''\n target[i][0] = float(target[i][0]) / width \n target[i][1] = float(target[i][1]) / height \n target[i][2] = float(target[i][2]) / width \n target[i][3] = float(target[i][3]) / height \n '''\n if target[i][0] < 0.0001:\n target[i][0] = 0.0001 \n if target[i][1] < 0.0001:\n target[i][1] = 0.0001 \n if target[i][2] > 0.9999:\n target[i][2] = 0.9999\n if target[i][3] > 0.9999:\n target[i][3] = 0.9999\n '''\n # filter error bbox\n \n #if target[i][0] >= target[i][2] or target[i][1] >= target[i][3] or target[i][0] < 0 or target[i][1] < 0 or target[i][2] > 1 or target[i][3] > 1 :\n # print (\"error bbox: \" , target[i])\n \n '''\n assert target[i][0] >= 0.001\n assert target[i][1] >= 0.001\n assert target[i][2] <= 0.999\n assert target[i][3] <= 0.999\n assert target[i][0] < target[i][2]\n assert target[i][1] < target[i][3]\n '''\n #res.append( [ target[i][0], target[i][1], target[i][2], target[i][3], target[i][4] ] )\n return target # [[xmin, ymin, xmax, ymax, label_ind], ... ]\n\nclass WIDERFaceDetection(data.Dataset):\n \"\"\"WIDERFace Detection Dataset Object \n http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/\n\n input is image, target is annotation\n\n Arguments:\n root (string): filepath to WIDERFace folder.\n image_set (string): imageset to use (eg. 'train', 'val', 'test')\n transform (callable, optional): transformation to perform on the\n input image\n target_transform (callable, optional): transformation to perform on the\n target `annotation`\n (eg: take in caption string, return tensor of word indices)\n dataset_name (string, optional): which dataset to load\n (default: 'WIDERFace')\n \"\"\"\n\n def __init__(self, root,\n image_sets='train',\n transform=None, target_transform=WIDERFaceAnnotationTransform(),\n dataset_name='WIDER Face'):\n\n self.root = root\n self.image_set = image_sets\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n '''\n self._annopath = osp.join('%s', 'Annotations', '%s.xml')\n self._imgpath = osp.join('%s', 'JPEGImages', '%s.jpg')\n '''\n self.img_ids = list()\n self.label_ids = list()\n self.event_ids = list()\n '''\n for (year, name) in image_sets:\n rootpath = osp.join(self.root, 'VOC' + year)\n for line in open(osp.join(rootpath, 'ImageSets', 'Main', name + '.txt')):\n self.ids.append((rootpath, line.strip()))\n '''\n if self.image_set == 'train':\n path_to_label = osp.join ( self.root , 'wider_face_split' ) \n path_to_image = osp.join ( self.root , 'WIDER_train/images' )\n fname = \"wider_face_train.mat\"\n\n if self.image_set == 'val':\n path_to_label = osp.join ( self.root , 'wider_face_split' ) \n path_to_image = osp.join ( self.root , 'WIDER_val/images' )\n fname = \"wider_face_val.mat\"\n\n if self.image_set == 'test':\n path_to_label = osp.join ( self.root , 'wider_face_split' ) \n path_to_image = osp.join ( self.root , 'WIDER_test/images' )\n fname = \"wider_face_test.mat\"\n\n self.path_to_label = path_to_label\n self.path_to_image = path_to_image\n self.fname = fname\n self.f = scipy.io.loadmat(osp.join(self.path_to_label, self.fname))\n self.event_list = self.f.get('event_list')\n self.file_list = self.f.get('file_list')\n self.face_bbx_list = self.f.get('face_bbx_list')\n \n self._load_widerface()\n\n def _load_widerface(self):\n\n error_bbox = 0 \n train_bbox = 0\n for event_idx, event in enumerate(self.event_list):\n directory = event[0][0]\n for im_idx, im in enumerate(self.file_list[event_idx][0]):\n im_name = im[0][0]\n\n if self.image_set in [ 'test' , 'val']:\n self.img_ids.append( osp.join(self.path_to_image, directory, im_name + '.jpg') )\n self.event_ids.append( directory )\n self.label_ids.append([])\n continue\n\n face_bbx = self.face_bbx_list[event_idx][0][im_idx][0]\n bboxes = []\n for i in range(face_bbx.shape[0]):\n # filter bbox\n if face_bbx[i][2] < 2 or face_bbx[i][3] < 2 or face_bbx[i][0] < 0 or face_bbx[i][1] < 0:\n error_bbox +=1\n #print (face_bbx[i])\n continue \n train_bbox += 1 \n xmin = float(face_bbx[i][0])\n ymin = float(face_bbx[i][1])\n xmax = float(face_bbx[i][2]) + xmin -1 \t\n ymax = float(face_bbx[i][3]) + ymin -1\n bboxes.append([xmin, ymin, xmax, ymax, 0])\n\n if ( len(bboxes)==0 ): # filter bbox will make bbox none\n continue\n self.img_ids.append( osp.join(self.path_to_image, directory, im_name + '.jpg') )\n self.event_ids.append( directory )\n self.label_ids.append( bboxes )\n #yield DATA(os.path.join(self.path_to_image, directory, im_name + '.jpg'), bboxes)\n print(\"Error bbox number to filter : %d, bbox number: %d\" %(error_bbox , train_bbox))\n \n\n def __getitem__(self, index):\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def __len__(self):\n return len(self.img_ids)\n\n def pull_item(self, index):\n\n target = self.label_ids[index]\n img = cv2.imread(self.img_ids[index])\n\n height, width, channels = img.shape\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n\n if self.transform is not None:\n target = np.array(target)\n # data augmentation\n img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])\n #self.vis_detections_v2(img , boxes , index)\n # to rgb\n #img = img[:, :, (2, 1, 0)]\n # img = img.transpose(2, 0, 1)\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n # return torch.from_numpy(img), target, height, width\n\n def vis_detections(self , im, dets, image_name ):\n\n cv2.imwrite(\"./tmp_res/\"+str(image_name)+\"ori.jpg\" , im)\n print (im)\n size = im.shape[0]\n dets = dets*size\n \"\"\"Draw detected bounding boxes.\"\"\"\n class_name = 'face'\n #im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n\n for i in range(len(dets)):\n bbox = dets[i, :4]\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0] + 1,\n bbox[3] - bbox[1] + 1, fill=False,\n edgecolor='red', linewidth=2.5)\n )\n plt.axis('off')\n plt.tight_layout()\n plt.savefig('./tmp_res/'+str(image_name)+\".jpg\", dpi=fig.dpi)\n\n def vis_detections_v2(self , im, dets, image_name ):\n size = im.shape[0]\n dets = dets*size\n \"\"\"Draw detected bounding boxes.\"\"\"\n class_name = 'face'\n for i in range(len(dets)):\n bbox = dets[i, :4]\n #print ((bbox[0],bbox[1]), (bbox[2],bbox[3]) )\n cv2.rectangle( im , (int(bbox[0]),int(bbox[1])), (int(bbox[2]),int(bbox[3])), (0,255,0),5 )\n cv2.imwrite('./tmp_res/'+str(image_name)+\".jpg\", im)\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n PIL img\n '''\n return cv2.imread(self.img_ids[index], cv2.IMREAD_COLOR)\n\n def pull_event(self, index):\n return self.event_ids[index]\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.img_ids[index]\n anno = self.label_ids[index]\n gt = self.target_transform(anno, 1, 1)\n return img_id.split(\"/\")[-1], gt\n\n def pull_tensor(self, index):\n '''Returns the original image at an index in tensor form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n tensorized version of img, squeezed\n '''\n return torch.Tensor(self.pull_image(index)).unsqueeze_(0)\n\n'''\nfrom utils.augmentations import SSDAugmentation\nif __name__ == '__main__': \n dataset = WIDERFaceDetection( root=WIDERFace_ROOT, transform=SSDAugmentation(640,(104,117,123) ) )\n for i in range(10000):\n img, tar = dataset.pull_item(i)\n print (sta_w)\n'''\n",
"import cv2\nimport torch\nimport random\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nfrom pathlib import Path\nfrom tqdm.notebook import tqdm\n\n\ndef read_frame_as_size(video_path, size=(128, 128)):\n capture = cv2.VideoCapture(str(video_path))\n ret, frame = capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, size)\n capture.release()\n return frame\n\n\ndef read_frame(video_path):\n capture = cv2.VideoCapture(str(video_path))\n ret, frame = capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n capture.release()\n return frame\n\n\ndef read_all_frames(video_path):\n capture = cv2.VideoCapture(str(video_path))\n all_frames = []\n ret = True\n while True:\n ret, frame = capture.read()\n if ret:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n all_frames.append(frame)\n else:\n break\n\n capture.release()\n return np.array(all_frames)\n\n\ndef read_frames(video_path, start=0, end=16):\n capture = cv2.VideoCapture(str(video_path))\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n end = min(frame_count, end)\n\n capture.set(cv2.CAP_PROP_POS_FRAMES, start)\n\n frames = []\n for i in range(start, end):\n success, frame = capture.read()\n if not success:\n # If we couldn't read a frame, just continue\n continue\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frames.append(frame)\n\n capture.release()\n return np.array(frames)\n\n\ndef read_all_frames_as_square_crops(video_path):\n capture = cv2.VideoCapture(str(video_path))\n all_frames = []\n ret = True\n while True:\n ret, frame = capture.read()\n if ret:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n all_frames.append(frame)\n else:\n break\n\n capture.release()\n\n all_frames = np.array(all_frames)\n\n n_frames, height, width, channels = all_frames.shape\n\n # For vertical videos, just take the square crop\n if height > width:\n all_frames = all_frames[:, :width, :, :]\n\n return all_frames\n\n\ndef get_height_and_width_of_video(video_path):\n capture = cv2.VideoCapture(str(video_path))\n\n width = capture.get(cv2.CAP_PROP_FRAME_WIDTH) # float\n height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT) # float\n\n return height, width\n\n\ndef read_random_frames(video_path, num_frames=1, frame_count=None):\n \"\"\"\n Read {num_frames} random frames from any point in the video.\n \"\"\"\n frames = []\n\n for i in range(num_frames):\n frame = read_random_frame(video_path, frame_count)\n frames.append(frame)\n\n return np.array(frames)\n\n\ndef read_random_frame(video_path, frame_count=None):\n \"\"\"\n Read a random frame from any point in the video.\n \"\"\"\n capture = cv2.VideoCapture(str(video_path))\n\n if frame_count is None:\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n # HACK: Some videos are missing the last 10 frames. No idea why.\n random_frame = int(random.random() * frame_count) - 10\n # Set to read specific frame\n capture.set(cv2.CAP_PROP_POS_FRAMES, random_frame)\n ret, frame = capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n capture.release()\n return frame\n\n\ndef read_frame_at_frame_number(video_path, frame_number):\n capture = cv2.VideoCapture(str(video_path))\n # Set to read specific frame\n capture.set(cv2.CAP_PROP_POS_FRAMES, frame_number)\n ret, frame = capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n capture.release()\n return frame\n\n\ndef read_random_sequential_frames(video_path, num_frames=4):\n \"\"\"\n Starting at a random point in the video, read {num_frames} frames and return\n as a single numpy array\n \"\"\"\n\n capture = cv2.VideoCapture(str(video_path))\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) - num_frames\n random_frame = int(random.random() * frame_count)\n capture.set(cv2.CAP_PROP_POS_FRAMES, random_frame)\n frames = []\n for i in range(num_frames):\n # Set to read specific frame\n ret, frame = capture.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frames.append(frame)\n\n capture.release()\n return np.array(frames)\n\n\ndef plot_detections(img, detections, with_keypoints=True, figsize=(10, 10)):\n fig, ax = plt.subplots(1, figsize=figsize)\n ax.grid(False)\n ax.imshow(img)\n\n if isinstance(detections, torch.Tensor):\n detections = detections.cpu().numpy()\n\n print(\"Found %d faces\" % len(detections))\n\n height, width, c = img.shape\n\n for i in range(len(detections)):\n xmin = max(0, detections[i, 0])\n ymin = max(0, detections[i, 1])\n xmax = min(width, detections[i, 2])\n ymax = min(height, detections[i, 3])\n\n rect = patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,\n linewidth=1, edgecolor=\"r\", facecolor=\"none\")\n ax.add_patch(rect)\n\n plt.show()\n\n\ndef get_video_stats(video_path):\n cap = cv2.VideoCapture(video_path)\n\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n frame_rate = cap.get(cv2.CAP_PROP_FPS)\n frame_num = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n\n cap.release()\n\n return width, height, frame_rate, frame_num\n\n\ndef nms(dets, thresh):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n\n return keep\n\n\ndef load_all_metadata():\n # Join metadata files into single dataframe\n metadata_list = []\n\n for i in tqdm(range(50)):\n folder = Path(\"../data/dfdc_train_part_\" + str(i))\n metadata_file_path = folder/'metadata.json'\n metadata = pd.read_json(metadata_file_path).T\n\n metadata.reset_index(inplace=True)\n metadata.rename({'index' : 'fname'}, axis=1, inplace=True)\n\n metadata['directory'] = str(folder)\n\n metadata_list.append(metadata)\n\n all_metadata = pd.concat(metadata_list)\n return all_metadata\n\n\ndef bb_intersection_over_union(boxA, boxB):\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n\n # compute the area of intersection rectangle\n interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))\n if interArea == 0:\n return 0\n # compute the area of both the prediction and ground-truth\n # rectangles\n boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))\n boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n # return the intersection over union value\n return iou\n\n"
] | [
[
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.tight_layout",
"numpy.expand_dims",
"matplotlib.pyplot.subplots",
"torch.from_numpy",
"matplotlib.pyplot.axis",
"numpy.array"
],
[
"pandas.concat",
"numpy.maximum",
"numpy.minimum",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"pandas.read_json",
"numpy.array",
"numpy.where",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ccjoechou/tvm | [
"779dc51e1332f417fa4c304b595ce76891dfc33a",
"779dc51e1332f417fa4c304b595ce76891dfc33a"
] | [
"python/tvm/meta_schedule/cost_model/cost_model.py",
"tests/python/contrib/test_ethosn/test_concatenate.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Meta Schedule CostModel.\"\"\"\nimport ctypes\nfrom typing import List\n\nimport numpy as np # type: ignore\nfrom tvm._ffi import register_object\nfrom tvm.runtime import Object\n\nfrom .. import _ffi_api\nfrom ..runner import RunnerResult\nfrom ..search_strategy import MeasureCandidate\nfrom ..tune_context import TuneContext\nfrom ..utils import _get_hex_address, check_override\n\n\n@register_object(\"meta_schedule.CostModel\")\nclass CostModel(Object):\n \"\"\"Cost model.\"\"\"\n\n def load(self, path: str) -> None:\n \"\"\"Load the cost model from given file location.\n\n Parameters\n ----------\n path : str\n The file path.\n \"\"\"\n _ffi_api.CostModelLoad(self, path) # type: ignore # pylint: disable=no-member\n\n def save(self, path: str) -> None:\n \"\"\"Save the cost model to given file location.\n\n Parameters\n ----------\n path : str\n The file path.\n \"\"\"\n _ffi_api.CostModelSave(self, path) # type: ignore # pylint: disable=no-member\n\n def update(\n self,\n context: TuneContext,\n candidates: List[MeasureCandidate],\n results: List[RunnerResult],\n ) -> None:\n \"\"\"Update the cost model given running results.\n\n Parameters\n ----------\n context : TuneContext,\n The tuning context.\n candidates : List[MeasureCandidate]\n The measure candidates.\n results : List[RunnerResult]\n The running results of the measure candidates.\n \"\"\"\n _ffi_api.CostModelUpdate(self, context, candidates, results) # type: ignore # pylint: disable=no-member\n\n def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:\n \"\"\"Update the cost model given running results.\n\n Parameters\n ----------\n context : TuneContext,\n The tuning context.\n candidates : List[MeasureCandidate]\n The measure candidates.\n\n Return\n ------\n result : np.ndarray\n The predicted normalized score.\n \"\"\"\n n = len(candidates)\n results = np.zeros(shape=(n,), dtype=\"float64\")\n _ffi_api.CostModelPredict( # type: ignore # pylint: disable=no-member\n self,\n context,\n candidates,\n results.ctypes.data_as(ctypes.c_void_p),\n )\n return results\n\n\n@register_object(\"meta_schedule.PyCostModel\")\nclass PyCostModel(CostModel):\n \"\"\"An abstract CostModel with customized methods on the python-side.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n\n @check_override(self.__class__, CostModel)\n def f_load(path: str) -> None:\n self.load(path)\n\n @check_override(self.__class__, CostModel)\n def f_save(path: str) -> None:\n self.save(path)\n\n @check_override(self.__class__, CostModel)\n def f_update(\n context: TuneContext,\n candidates: List[MeasureCandidate],\n results: List[RunnerResult],\n ) -> None:\n self.update(context, candidates, results)\n\n @check_override(self.__class__, CostModel)\n def f_predict(context: TuneContext, candidates: List[MeasureCandidate], return_ptr) -> None:\n n = len(candidates)\n return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_double))\n array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(n,))\n array_wrapper[:] = self.predict(context, candidates)\n assert (\n array_wrapper.dtype == \"float64\"\n ), \"ValueError: Invalid data type returned from CostModel Predict!\"\n\n def f_as_string() -> str:\n return str(self)\n\n self.__init_handle_by_constructor__(\n _ffi_api.CostModelPyCostModel, # type: ignore # pylint: disable=no-member\n f_load,\n f_save,\n f_update,\n f_predict,\n f_as_string,\n )\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}({_get_hex_address(self.handle)})\"\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Concatenate tests for Arm(R) Ethos(TM)-N\"\"\"\n\nimport numpy as np\nimport tvm\nfrom tvm import relay\nfrom tvm.testing import requires_ethosn\nfrom . import infrastructure as tei\n\n\ndef _get_inputs(shapes):\n inputs = {}\n for i, shape in enumerate(shapes):\n inputs[\"in\" + str(i)] = tvm.nd.array(\n np.random.randint(0, high=256, size=shape, dtype=\"uint8\")\n )\n\n return inputs\n\n\ndef _get_model(shapes, dtype, axis):\n tup = []\n for i, shape in enumerate(shapes):\n a = relay.var(\"in\" + str(i), shape=shape, dtype=dtype)\n tup.append(a)\n\n zeroi = relay.const(1, \"int32\")\n zerof = relay.const(0.5, \"float32\")\n con = relay.qnn.op.concatenate(\n tup,\n input_scales=[zerof] * len(shapes),\n input_zero_points=[zeroi] * len(shapes),\n output_scale=zerof,\n output_zero_point=zeroi,\n axis=axis,\n )\n return con\n\n\n@requires_ethosn\ndef test_concatenate():\n trials = [\n ([(1, 4), (1, 6)], 1),\n ([(1, 16, 4), (1, 16, 4)], 1),\n ([(1, 25, 4, 16)] * 3, 3),\n ([(1, 25, 4, 16), (1, 25, 5, 16), (1, 25, 6, 16)], 2),\n ]\n\n for shapes, axis in trials:\n outputs = []\n inputs = _get_inputs(shapes)\n for npu in [False, True]:\n model = _get_model(shapes, \"uint8\", axis)\n mod = tei.make_module(model, {})\n outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))\n\n tei.verify(outputs, 0)\n\n\n@requires_ethosn\ndef test_concatenate_failure():\n trials = [\n ([(1, 4, 4, 4, 4), (1, 4, 4, 4, 4)], \"uint8\", 1, \"dimensions=5, dimensions must be <= 4;\"),\n (\n [(1, 4, 4, 4), (1, 4, 4, 4)],\n \"uint8\",\n 3,\n \"Concatenation along the channels dimension (axis 3) requires input tensors with a multiple of 16 channels;\",\n ),\n (\n [(1, 4, 4, 4), (1, 4, 4, 4)],\n \"int8\",\n 2,\n \"dtype='int8', dtype must be either uint8 or int32; dtype='int8', dtype must be either uint8 or int32;\",\n ),\n (\n [(2, 4, 4, 4), (2, 4, 4, 4)],\n \"uint8\",\n 2,\n \"batch size=2, batch size must = 1; batch size=2, batch size must = 1;\",\n ),\n (\n [(1, 4, 4, 4), (1, 4, 4, 4)],\n \"uint8\",\n 0,\n \"Concatenation cannot be performed along batch axis (axis 0);\",\n ),\n ]\n\n for shapes, dtype, axis, err_msg in trials:\n model = _get_model(shapes, dtype, axis)\n mod = tei.make_ethosn_partition(model)\n tei.test_error(mod, {}, err_msg)\n"
] | [
[
"numpy.ctypeslib.as_array",
"numpy.zeros"
],
[
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JeroenDM/acrobotics | [
"d734ca25f40015e5c5ff019402a83504783c13cd"
] | [
"tests/test_link.py"
] | [
"from acrobotics.link import Link, LinkKinematics, DHLink, JointType\n\nimport numpy as np\nimport casadi as ca\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\nfrom acrobotics.geometry import Scene\nfrom acrobotics.shapes import Box\nfrom numpy.testing import assert_almost_equal\n\n\ndef DenHarMat(theta, alpha, a, d):\n \"\"\" Use code from someone else to compare with:\n https://stackoverflow.com/questions/17891024/forward-kinematics-data-modeling\n \"\"\"\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n cos_alpha = np.cos(alpha)\n sin_alpha = np.sin(alpha)\n\n return np.array(\n [\n [cos_theta, -sin_theta * cos_alpha, sin_theta * sin_alpha, a * cos_theta],\n [sin_theta, cos_theta * cos_alpha, -cos_theta * sin_alpha, a * sin_theta],\n [0, sin_alpha, cos_alpha, d],\n [0, 0, 0, 1],\n ]\n )\n\n\nclass TestLinkKinematics:\n def test_init(self):\n dh_params = DHLink(0.1, np.pi / 4, -0.1, np.pi / 6)\n link1 = LinkKinematics(dh_params, JointType.revolute)\n link2 = LinkKinematics(dh_params, JointType.prismatic)\n\n assert link1.joint_type == JointType.revolute\n assert link2.joint_type == JointType.prismatic\n assert link1.dh == dh_params\n assert link2.dh == dh_params\n\n def test_dh_matrix(self):\n dh_params = DHLink(0.1, np.pi / 4, -0.1, np.pi / 6)\n link1 = LinkKinematics(dh_params, JointType.revolute)\n link2 = LinkKinematics(dh_params, JointType.prismatic)\n\n q1 = 1.2\n T1 = link1.get_link_relative_transform(q1)\n T1_desired = DenHarMat(q1, dh_params.alpha, dh_params.a, dh_params.d)\n assert_almost_equal(T1, T1_desired)\n\n d2 = 0.75\n T2 = link2.get_link_relative_transform(d2)\n T2_desired = DenHarMat(dh_params.theta, dh_params.alpha, dh_params.a, d2)\n assert_almost_equal(T2, T2_desired)\n\n def test_dh_matrix_casadi(self):\n dh_params = DHLink(0.1, np.pi / 4, -0.1, np.pi / 6)\n link1 = LinkKinematics(dh_params, JointType.revolute)\n link2 = LinkKinematics(dh_params, JointType.prismatic)\n\n opti = ca.Opti()\n\n q1 = opti.variable()\n T1 = ca.Function(\"T1\", [q1], [link1.get_link_relative_transform_casadi(q1)])\n T1_desired = DenHarMat(1.2, dh_params.alpha, dh_params.a, dh_params.d)\n assert_almost_equal(np.array(T1(1.2)), T1_desired)\n\n d1 = opti.variable()\n T2 = ca.Function(\"T2\", [d1], [link2.get_link_relative_transform_casadi(d1)])\n T2_desired = DenHarMat(dh_params.theta, dh_params.alpha, dh_params.a, 0.75)\n assert_almost_equal(np.array(T2(0.75)), T2_desired)\n\n\nclass TestLink:\n def test_init(self):\n dh_params = DHLink(0.1, np.pi / 4, -0.1, np.pi / 6)\n geometry = Scene([Box(1, 2, 3)], [np.eye(4)])\n link1 = Link(dh_params, JointType.revolute, geometry)\n\n fig = plt.figure()\n ax = fig.gca(projection=\"3d\")\n link1.plot(ax, np.eye(4))\n"
] | [
[
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.testing.assert_almost_equal",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jeikabu/lumberyard | [
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115",
"07228c605ce16cbf5aaa209a94a3cb9d6c1a4115"
] | [
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/cuda/tests/cudadrv/test_deallocations.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/hsa/tests/hsadrv/test_hlc.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/distutils/tests/test_misc_util.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/tests/groupby/test_nth.py",
"dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/sparse/sputils.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/tests/indexes/timedeltas/test_timedelta.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/polynomial/tests/test_polynomial.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/hsa/tests/hsapy/test_matmul.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/tests/dtypes/test_inference.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/tests/series/test_rank.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/fft/tests/test_helper.py",
"dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/linalg/tests/test_basic.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/tests/test_compat.py",
"dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/date_converters.py",
"dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/stats/_discrete_distns.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/tests/test_expressions.py",
"dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/optimize/nnls.py",
"dev/Tools/Python/2.7.13/mac/Python.framework/Versions/2.7/lib/python2.7/site-packages/scipy/io/matlab/tests/test_mio.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/tests/test_nrt.py",
"dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/computation/align.py",
"dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/quicksort.py",
"dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/tests/api/test_types.py"
] | [
"from __future__ import division\n\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom numba import cuda, config\nfrom numba.cuda.testing import unittest, skip_on_cudasim\nfrom numba.tests.support import captured_stderr\n\n\n@skip_on_cudasim('not supported on CUDASIM')\nclass TestDeallocation(unittest.TestCase):\n def test_max_pending_count(self):\n # get deallocation manager and flush it\n deallocs = cuda.current_context().deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n # deallocate to maximum count\n for i in range(config.CUDA_DEALLOCS_COUNT):\n cuda.to_device(np.arange(1))\n self.assertEqual(len(deallocs), i + 1)\n # one more to trigger .clear()\n cuda.to_device(np.arange(1))\n self.assertEqual(len(deallocs), 0)\n\n def test_max_pending_bytes(self):\n # get deallocation manager and flush it\n ctx = cuda.current_context()\n deallocs = ctx.deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n mi = ctx.get_memory_info()\n\n max_pending = 10**6 # 1MB\n old_ratio = config.CUDA_DEALLOCS_RATIO\n try:\n # change to a smaller ratio\n config.CUDA_DEALLOCS_RATIO = max_pending / mi.total\n self.assertEqual(deallocs._max_pending_bytes, max_pending)\n\n # deallocate half the max size\n cuda.to_device(np.ones(max_pending // 2, dtype=np.int8))\n self.assertEqual(len(deallocs), 1)\n\n # deallocate another remaining\n cuda.to_device(np.ones(max_pending - deallocs._size, dtype=np.int8))\n self.assertEqual(len(deallocs), 2)\n\n # another byte to trigger .clear()\n cuda.to_device(np.ones(1, dtype=np.int8))\n self.assertEqual(len(deallocs), 0)\n finally:\n # restore old ratio\n config.CUDA_DEALLOCS_RATIO = old_ratio\n\n\n@skip_on_cudasim(\"defer_cleanup has no effect in CUDASIM\")\nclass TestDeferCleanup(unittest.TestCase):\n def test_basic(self):\n harr = np.arange(5)\n darr1 = cuda.to_device(harr)\n deallocs = cuda.current_context().deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n with cuda.defer_cleanup():\n darr2 = cuda.to_device(harr)\n del darr1\n self.assertEqual(len(deallocs), 1)\n del darr2\n self.assertEqual(len(deallocs), 2)\n deallocs.clear()\n self.assertEqual(len(deallocs), 2)\n\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n def test_nested(self):\n harr = np.arange(5)\n darr1 = cuda.to_device(harr)\n deallocs = cuda.current_context().deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n with cuda.defer_cleanup():\n with cuda.defer_cleanup():\n darr2 = cuda.to_device(harr)\n del darr1\n self.assertEqual(len(deallocs), 1)\n del darr2\n self.assertEqual(len(deallocs), 2)\n deallocs.clear()\n self.assertEqual(len(deallocs), 2)\n deallocs.clear()\n self.assertEqual(len(deallocs), 2)\n\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n def test_exception(self):\n harr = np.arange(5)\n darr1 = cuda.to_device(harr)\n deallocs = cuda.current_context().deallocations\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n class CustomError(Exception):\n pass\n\n with self.assertRaises(CustomError):\n with cuda.defer_cleanup():\n darr2 = cuda.to_device(harr)\n del darr2\n self.assertEqual(len(deallocs), 1)\n deallocs.clear()\n self.assertEqual(len(deallocs), 1)\n raise CustomError\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n del darr1\n self.assertEqual(len(deallocs), 1)\n deallocs.clear()\n self.assertEqual(len(deallocs), 0)\n\n\nclass TestDeferCleanupAvail(unittest.TestCase):\n def test_context_manager(self):\n # just make sure the API is available\n with cuda.defer_cleanup():\n pass\n\n\n@skip_on_cudasim('not supported on CUDASIM')\nclass TestDel(unittest.TestCase):\n \"\"\"\n Ensure resources are deleted properly without ignored exception.\n \"\"\"\n @contextmanager\n def check_ignored_exception(self, ctx):\n with captured_stderr() as cap:\n yield\n ctx.deallocations.clear()\n self.assertFalse(cap.getvalue())\n\n def test_stream(self):\n ctx = cuda.current_context()\n stream = ctx.create_stream()\n with self.check_ignored_exception(ctx):\n del stream\n\n def test_event(self):\n ctx = cuda.current_context()\n event = ctx.create_event()\n with self.check_ignored_exception(ctx):\n del event\n\n def test_pinned_memory(self):\n ctx = cuda.current_context()\n mem = ctx.memhostalloc(32)\n with self.check_ignored_exception(ctx):\n del mem\n\n def test_mapped_memory(self):\n ctx = cuda.current_context()\n mem = ctx.memhostalloc(32, mapped=True)\n with self.check_ignored_exception(ctx):\n del mem\n\n def test_device_memory(self):\n ctx = cuda.current_context()\n mem = ctx.memalloc(32)\n with self.check_ignored_exception(ctx):\n del mem\n\n\nif __name__ == '__main__':\n unittest.main()",
"from __future__ import print_function, absolute_import\n\nimport numba.unittest_support as unittest\nfrom numba.hsa.hlc import hlc\n\nSPIR_SAMPLE = \"\"\"\n; ModuleID = 'kernel.out.bc'\ntarget datalayout = \"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-n32\"\ntarget triple = \"hsail64-pc-unknown-amdopencl\"\n\ndefine spir_kernel void @copy(float addrspace(1)* nocapture %input,\nfloat addrspace(1)* nocapture %output) {\n %1 = load float addrspace(1)* %input, align 4, !tbaa !8\n store float %1, float addrspace(1)* %output, align 4, !tbaa !8\n ret void\n}\n\n!opencl.kernels = !{!0}\n!opencl.enable.FP_CONTRACT = !{}\n!opencl.spir.version = !{!6}\n!opencl.ocl.version = !{!6}\n!opencl.used.extensions = !{!7}\n!opencl.used.optional.core.features = !{!7}\n!opencl.compiler.options = !{!7}\n!0 = metadata !{void (float addrspace(1)*, float addrspace(1)*)* @copy, metadata !1, metadata !2, metadata !3, metadata !4, metadata !5}\n!1 = metadata !{metadata !\"kernel_arg_addr_space\", i32 1, i32 1}\n!2 = metadata !{metadata !\"kernel_arg_access_qual\", metadata !\"none\", metadata !\"none\"}\n!3 = metadata !{metadata !\"kernel_arg_type\", metadata !\"float*\", metadata !\"float*\"}\n!4 = metadata !{metadata !\"kernel_arg_type_qual\", metadata !\"\", metadata !\"\"}\n!5 = metadata !{metadata !\"kernel_arg_base_type\", metadata !\"float*\", metadata !\"float*\"}\n!6 = metadata !{i32 1, i32 2}\n!7 = metadata !{}\n!8 = metadata !{metadata !\"float\", metadata !9}\n!9 = metadata !{metadata !\"omnipotent char\", metadata !10}\n!10 = metadata !{metadata !\"Simple C/C++ TBAA\"}\n\"\"\"\n\n\nclass TestHLC(unittest.TestCase):\n def test_hsail(self):\n hlcmod = hlc.Module()\n hlcmod.load_llvm(SPIR_SAMPLE)\n hsail = hlcmod.finalize().hsail\n self.assertIn(\"prog kernel ©\", hsail)\n\n def test_brig(self):\n # Genreate BRIG\n hlcmod = hlc.Module()\n hlcmod.load_llvm(SPIR_SAMPLE)\n brig = hlcmod.finalize().brig\n # Check the first 8 bytes for the magic string\n self.assertEqual(brig[:8].decode('latin1'), 'HSA BRIG')\n\n # Compile\n from numba.hsa.hsadrv.driver import BrigModule, Program, hsa, Executable\n\n agent = hsa.components[0]\n brigmod = BrigModule(brig)\n prog = Program()\n prog.add_module(brigmod)\n code = prog.finalize(agent.isa)\n ex = Executable()\n ex.load(agent, code)\n ex.freeze()\n sym = ex.get_symbol(agent, \"©\")\n self.assertNotEqual(sym.kernel_object, 0)\n self.assertGreater(sym.kernarg_segment_size, 0)\n\n # Execute\n import ctypes\n import numpy as np\n\n sig = hsa.create_signal(1)\n\n kernarg_region = [r for r in agent.regions if r.supports_kernargs][0]\n\n kernarg_types = (ctypes.c_void_p * 2)\n kernargs = kernarg_region.allocate(kernarg_types)\n\n src = np.random.random(1).astype(np.float32)\n dst = np.zeros_like(src)\n\n kernargs[0] = src.ctypes.data\n kernargs[1] = dst.ctypes.data\n\n hsa.hsa_memory_register(src.ctypes.data, src.nbytes)\n hsa.hsa_memory_register(dst.ctypes.data, dst.nbytes)\n hsa.hsa_memory_register(ctypes.byref(kernargs),\n ctypes.sizeof(kernargs))\n\n queue = agent.create_queue_single(32)\n queue.dispatch(sym, kernargs, workgroup_size=(1,),\n grid_size=(1,))\n\n np.testing.assert_equal(dst, src)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n",
"from __future__ import division, absolute_import, print_function\n\nfrom os.path import join, sep, dirname\n\nfrom numpy.distutils.misc_util import (\n appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info\n)\nfrom numpy.testing import (\n TestCase, run_module_suite, assert_, assert_equal\n)\n\najoin = lambda *paths: join(*((sep,)+paths))\n\nclass TestAppendpath(TestCase):\n\n def test_1(self):\n assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))\n assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))\n assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))\n assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))\n\n def test_2(self):\n assert_equal(appendpath('prefix/sub', 'name'),\n join('prefix', 'sub', 'name'))\n assert_equal(appendpath('prefix/sub', 'sup/name'),\n join('prefix', 'sub', 'sup', 'name'))\n assert_equal(appendpath('/prefix/sub', '/prefix/name'),\n ajoin('prefix', 'sub', 'name'))\n\n def test_3(self):\n assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),\n ajoin('prefix', 'sub', 'sup', 'name'))\n assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),\n ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))\n assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),\n ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))\n\nclass TestMinrelpath(TestCase):\n\n def test_1(self):\n n = lambda path: path.replace('/', sep)\n assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))\n assert_equal(minrelpath('..'), '..')\n assert_equal(minrelpath(n('aa/..')), '')\n assert_equal(minrelpath(n('aa/../bb')), 'bb')\n assert_equal(minrelpath(n('aa/bb/..')), 'aa')\n assert_equal(minrelpath(n('aa/bb/../..')), '')\n assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))\n assert_equal(minrelpath(n('.././..')), n('../..'))\n assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))\n\nclass TestGpaths(TestCase):\n\n def test_gpaths(self):\n local_path = minrelpath(join(dirname(__file__), '..'))\n ls = gpaths('command/*.py', local_path)\n assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))\n f = gpaths('system_info.py', local_path)\n assert_(join(local_path, 'system_info.py') == f[0], repr(f))\n\nclass TestSharedExtension(TestCase):\n\n def test_get_shared_lib_extension(self):\n import sys\n ext = get_shared_lib_extension(is_python_ext=False)\n if sys.platform.startswith('linux'):\n assert_equal(ext, '.so')\n elif sys.platform.startswith('gnukfreebsd'):\n assert_equal(ext, '.so')\n elif sys.platform.startswith('darwin'):\n assert_equal(ext, '.dylib')\n elif sys.platform.startswith('win'):\n assert_equal(ext, '.dll')\n # just check for no crash\n assert_(get_shared_lib_extension(is_python_ext=True))\n\n\ndef test_installed_npymath_ini():\n # Regression test for gh-7707. If npymath.ini wasn't installed, then this\n # will give an error.\n info = get_info('npymath')\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"import numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, MultiIndex, Index, Series, isnull\nfrom pandas.compat import lrange\nfrom pandas.util.testing import assert_frame_equal, assert_series_equal\n\nfrom .common import MixIn\n\n\nclass TestNth(MixIn):\n\n def test_first_last_nth(self):\n # tests for first / last / nth\n grouped = self.df.groupby('A')\n first = grouped.first()\n expected = self.df.loc[[1, 0], ['B', 'C', 'D']]\n expected.index = Index(['bar', 'foo'], name='A')\n expected = expected.sort_index()\n assert_frame_equal(first, expected)\n\n nth = grouped.nth(0)\n assert_frame_equal(nth, expected)\n\n last = grouped.last()\n expected = self.df.loc[[5, 7], ['B', 'C', 'D']]\n expected.index = Index(['bar', 'foo'], name='A')\n assert_frame_equal(last, expected)\n\n nth = grouped.nth(-1)\n assert_frame_equal(nth, expected)\n\n nth = grouped.nth(1)\n expected = self.df.loc[[2, 3], ['B', 'C', 'D']].copy()\n expected.index = Index(['foo', 'bar'], name='A')\n expected = expected.sort_index()\n assert_frame_equal(nth, expected)\n\n # it works!\n grouped['B'].first()\n grouped['B'].last()\n grouped['B'].nth(0)\n\n self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan\n assert isnull(grouped['B'].first()['foo'])\n assert isnull(grouped['B'].last()['foo'])\n assert isnull(grouped['B'].nth(0)['foo'])\n\n # v0.14.0 whatsnew\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])\n g = df.groupby('A')\n result = g.first()\n expected = df.iloc[[1, 2]].set_index('A')\n assert_frame_equal(result, expected)\n\n expected = df.iloc[[1, 2]].set_index('A')\n result = g.nth(0, dropna='any')\n assert_frame_equal(result, expected)\n\n def test_first_last_nth_dtypes(self):\n\n df = self.df_mixed_floats.copy()\n df['E'] = True\n df['F'] = 1\n\n # tests for first / last / nth\n grouped = df.groupby('A')\n first = grouped.first()\n expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]\n expected.index = Index(['bar', 'foo'], name='A')\n expected = expected.sort_index()\n assert_frame_equal(first, expected)\n\n last = grouped.last()\n expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]\n expected.index = Index(['bar', 'foo'], name='A')\n expected = expected.sort_index()\n assert_frame_equal(last, expected)\n\n nth = grouped.nth(1)\n expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]\n expected.index = Index(['bar', 'foo'], name='A')\n expected = expected.sort_index()\n assert_frame_equal(nth, expected)\n\n # GH 2763, first/last shifting dtypes\n idx = lrange(10)\n idx.append(9)\n s = Series(data=lrange(11), index=idx, name='IntCol')\n assert s.dtype == 'int64'\n f = s.groupby(level=0).first()\n assert f.dtype == 'int64'\n\n def test_nth(self):\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])\n g = df.groupby('A')\n\n assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))\n assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))\n assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))\n assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))\n assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))\n assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))\n assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])\n assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])\n assert_frame_equal(g[['B']].nth(0),\n df.loc[[0, 2], ['A', 'B']].set_index('A'))\n\n exp = df.set_index('A')\n assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])\n assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])\n\n exp['B'] = np.nan\n assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])\n assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])\n\n # out of bounds, regression from 0.13.1\n # GH 6621\n df = DataFrame({'color': {0: 'green',\n 1: 'green',\n 2: 'red',\n 3: 'red',\n 4: 'red'},\n 'food': {0: 'ham',\n 1: 'eggs',\n 2: 'eggs',\n 3: 'ham',\n 4: 'pork'},\n 'two': {0: 1.5456590000000001,\n 1: -0.070345000000000005,\n 2: -2.4004539999999999,\n 3: 0.46206000000000003,\n 4: 0.52350799999999997},\n 'one': {0: 0.56573799999999996,\n 1: -0.9742360000000001,\n 2: 1.033801,\n 3: -0.78543499999999999,\n 4: 0.70422799999999997}}).set_index(['color',\n 'food'])\n\n result = df.groupby(level=0, as_index=False).nth(2)\n expected = df.iloc[[-1]]\n assert_frame_equal(result, expected)\n\n result = df.groupby(level=0, as_index=False).nth(3)\n expected = df.loc[[]]\n assert_frame_equal(result, expected)\n\n # GH 7559\n # from the vbench\n df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')\n s = df[1]\n g = df[0]\n expected = s.groupby(g).first()\n expected2 = s.groupby(g).apply(lambda x: x.iloc[0])\n assert_series_equal(expected2, expected, check_names=False)\n assert expected.name, 0\n assert expected.name == 1\n\n # validate first\n v = s[g == 1].iloc[0]\n assert expected.iloc[0] == v\n assert expected2.iloc[0] == v\n\n # this is NOT the same as .first (as sorted is default!)\n # as it keeps the order in the series (and not the group order)\n # related GH 7287\n expected = s.groupby(g, sort=False).first()\n result = s.groupby(g, sort=False).nth(0, dropna='all')\n assert_series_equal(result, expected)\n\n # doc example\n df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])\n g = df.groupby('A')\n result = g.B.nth(0, dropna=True)\n expected = g.B.first()\n assert_series_equal(result, expected)\n\n # test multiple nth values\n df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],\n columns=['A', 'B'])\n g = df.groupby('A')\n\n assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))\n assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))\n assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))\n assert_frame_equal(\n g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))\n assert_frame_equal(\n g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))\n assert_frame_equal(\n g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))\n assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))\n assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))\n\n business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',\n freq='B')\n df = DataFrame(1, index=business_dates, columns=['a', 'b'])\n # get the first, fourth and last two business days for each month\n key = (df.index.year, df.index.month)\n result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])\n expected_dates = pd.to_datetime(\n ['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',\n '2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',\n '2014/6/27', '2014/6/30'])\n expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)\n assert_frame_equal(result, expected)\n\n def test_nth_multi_index(self):\n # PR 9090, related to issue 8979\n # test nth on MultiIndex, should match .first()\n grouped = self.three_group.groupby(['A', 'B'])\n result = grouped.nth(0)\n expected = grouped.first()\n assert_frame_equal(result, expected)\n\n def test_nth_multi_index_as_expected(self):\n # PR 9090, related to issue 8979\n # test nth on MultiIndex\n three_group = DataFrame(\n {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',\n 'dull', 'shiny', 'shiny', 'shiny']})\n grouped = three_group.groupby(['A', 'B'])\n result = grouped.nth(0)\n expected = DataFrame(\n {'C': ['dull', 'dull', 'dull', 'dull']},\n index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],\n ['one', 'two', 'one', 'two']],\n names=['A', 'B']))\n assert_frame_equal(result, expected)\n\n\ndef test_nth_empty():\n # GH 16064\n df = DataFrame(index=[0], columns=['a', 'b', 'c'])\n result = df.groupby('a').nth(10)\n expected = DataFrame(index=Index([], name='a'), columns=['b', 'c'])\n assert_frame_equal(result, expected)\n\n result = df.groupby(['a', 'b']).nth(10)\n expected = DataFrame(index=MultiIndex([[], []], [[], []],\n names=['a', 'b']),\n columns=['c'])\n assert_frame_equal(result, expected)\n",
"\"\"\" Utility functions for sparse matrix module\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\nimport numpy as np\n\n__all__ = ['upcast', 'getdtype', 'isscalarlike', 'isintlike',\n 'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype']\n\nsupported_dtypes = ['bool', 'int8', 'uint8', 'short', 'ushort', 'intc',\n 'uintc', 'longlong', 'ulonglong', 'single', 'double',\n 'longdouble', 'csingle', 'cdouble', 'clongdouble']\nsupported_dtypes = [np.typeDict[x] for x in supported_dtypes]\n\n_upcast_memo = {}\n\n\ndef upcast(*args):\n \"\"\"Returns the nearest supported sparse dtype for the\n combination of one or more types.\n\n upcast(t0, t1, ..., tn) -> T where T is a supported dtype\n\n Examples\n --------\n\n >>> upcast('int32')\n <type 'numpy.int32'>\n >>> upcast('bool')\n <type 'numpy.bool_'>\n >>> upcast('int32','float32')\n <type 'numpy.float64'>\n >>> upcast('bool',complex,float)\n <type 'numpy.complex128'>\n\n \"\"\"\n\n t = _upcast_memo.get(hash(args))\n if t is not None:\n return t\n\n upcast = np.find_common_type(args, [])\n\n for t in supported_dtypes:\n if np.can_cast(upcast, t):\n _upcast_memo[hash(args)] = t\n return t\n\n raise TypeError('no supported conversion for types: %r' % (args,))\n\n\ndef upcast_char(*args):\n \"\"\"Same as `upcast` but taking dtype.char as input (faster).\"\"\"\n t = _upcast_memo.get(args)\n if t is not None:\n return t\n t = upcast(*map(np.dtype, args))\n _upcast_memo[args] = t\n return t\n\n\ndef upcast_scalar(dtype, scalar):\n \"\"\"Determine data type for binary operation between an array of\n type `dtype` and a scalar.\n \"\"\"\n return (np.array([0], dtype=dtype) * scalar).dtype\n\n\ndef downcast_intp_index(arr):\n \"\"\"\n Down-cast index array to np.intp dtype if it is of a larger dtype.\n\n Raise an error if the array contains a value that is too large for\n intp.\n \"\"\"\n if arr.dtype.itemsize > np.dtype(np.intp).itemsize:\n if arr.size == 0:\n return arr.astype(np.intp)\n maxval = arr.max()\n minval = arr.min()\n if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:\n raise ValueError(\"Cannot deal with arrays with indices larger \"\n \"than the machine maximum address size \"\n \"(e.g. 64-bit indices on 32-bit machine).\")\n return arr.astype(np.intp)\n return arr\n\n\ndef to_native(A):\n return np.asarray(A, dtype=A.dtype.newbyteorder('native'))\n\n\ndef getdtype(dtype, a=None, default=None):\n \"\"\"Function used to simplify argument processing. If 'dtype' is not\n specified (is None), returns a.dtype; otherwise returns a np.dtype\n object created from the specified dtype argument. If 'dtype' and 'a'\n are both None, construct a data type out of the 'default' parameter.\n Furthermore, 'dtype' must be in 'allowed' set.\n \"\"\"\n # TODO is this really what we want?\n if dtype is None:\n try:\n newdtype = a.dtype\n except AttributeError:\n if default is not None:\n newdtype = np.dtype(default)\n else:\n raise TypeError(\"could not interpret data type\")\n else:\n newdtype = np.dtype(dtype)\n if newdtype == np.object_:\n warnings.warn(\"object dtype is not supported by sparse matrices\")\n\n return newdtype\n\n\ndef get_index_dtype(arrays=(), maxval=None, check_contents=False):\n \"\"\"\n Based on input (integer) arrays `a`, determine a suitable index data\n type that can hold the data in the arrays.\n\n Parameters\n ----------\n arrays : tuple of array_like\n Input arrays whose types/contents to check\n maxval : float, optional\n Maximum value needed\n check_contents : bool, optional\n Whether to check the values in the arrays and not just their types.\n Default: False (check only the types)\n\n Returns\n -------\n dtype : dtype\n Suitable index data type (int32 or int64)\n\n \"\"\"\n\n int32max = np.iinfo(np.int32).max\n\n dtype = np.intc\n if maxval is not None:\n if maxval > int32max:\n dtype = np.int64\n\n if isinstance(arrays, np.ndarray):\n arrays = (arrays,)\n\n for arr in arrays:\n arr = np.asarray(arr)\n if arr.dtype > np.int32:\n if check_contents:\n if arr.size == 0:\n # a bigger type not needed\n continue\n elif np.issubdtype(arr.dtype, np.integer):\n maxval = arr.max()\n minval = arr.min()\n if (minval >= np.iinfo(np.int32).min and\n maxval <= np.iinfo(np.int32).max):\n # a bigger type not needed\n continue\n\n dtype = np.int64\n break\n\n return dtype\n\n\ndef get_sum_dtype(dtype):\n \"\"\"Mimic numpy's casting for np.sum\"\"\"\n if np.issubdtype(dtype, np.float_):\n return np.float_\n if dtype.kind == 'u' and np.can_cast(dtype, np.uint):\n return np.uint\n if np.can_cast(dtype, np.int_):\n return np.int_\n return dtype\n\n\ndef isscalarlike(x):\n \"\"\"Is x either a scalar, an array scalar, or a 0-dim array?\"\"\"\n return np.isscalar(x) or (isdense(x) and x.ndim == 0)\n\n\ndef isintlike(x):\n \"\"\"Is x appropriate as an index into a sparse matrix? Returns True\n if it can be cast safely to a machine int.\n \"\"\"\n if not isscalarlike(x):\n return False\n try:\n return bool(int(x) == x)\n except (TypeError, ValueError):\n return False\n\n\ndef isshape(x):\n \"\"\"Is x a valid 2-tuple of dimensions?\n \"\"\"\n try:\n # Assume it's a tuple of matrix dimensions (M, N)\n (M, N) = x\n except:\n return False\n else:\n if isintlike(M) and isintlike(N):\n if np.ndim(M) == 0 and np.ndim(N) == 0:\n return True\n return False\n\n\ndef issequence(t):\n return ((isinstance(t, (list, tuple)) and\n (len(t) == 0 or np.isscalar(t[0]))) or\n (isinstance(t, np.ndarray) and (t.ndim == 1)))\n\n\ndef ismatrix(t):\n return ((isinstance(t, (list, tuple)) and\n len(t) > 0 and issequence(t[0])) or\n (isinstance(t, np.ndarray) and t.ndim == 2))\n\n\ndef isdense(x):\n return isinstance(x, np.ndarray)\n\n\ndef validateaxis(axis):\n if axis is not None:\n axis_type = type(axis)\n\n # In NumPy, you can pass in tuples for 'axis', but they are\n # not very useful for sparse matrices given their limited\n # dimensions, so let's make it explicit that they are not\n # allowed to be passed in\n if axis_type == tuple:\n raise TypeError((\"Tuples are not accepted for the 'axis' \"\n \"parameter. Please pass in one of the \"\n \"following: {-2, -1, 0, 1, None}.\"))\n\n # If not a tuple, check that the provided axis is actually\n # an integer and raise a TypeError similar to NumPy's\n if not np.issubdtype(np.dtype(axis_type), np.integer):\n raise TypeError(\"axis must be an integer, not {name}\"\n .format(name=axis_type.__name__))\n\n if not (-2 <= axis <= 1):\n raise ValueError(\"axis out of range\")\n\n\nclass IndexMixin(object):\n \"\"\"\n This class simply exists to hold the methods necessary for fancy indexing.\n \"\"\"\n def _slicetoarange(self, j, shape):\n \"\"\" Given a slice object, use numpy arange to change it to a 1D\n array.\n \"\"\"\n start, stop, step = j.indices(shape)\n return np.arange(start, stop, step)\n\n def _unpack_index(self, index):\n \"\"\" Parse index. Always return a tuple of the form (row, col).\n Where row/col is a integer, slice, or array of integers.\n \"\"\"\n # First, check if indexing with single boolean matrix.\n from .base import spmatrix # This feels dirty but...\n if (isinstance(index, (spmatrix, np.ndarray)) and\n (index.ndim == 2) and index.dtype.kind == 'b'):\n return index.nonzero()\n\n # Parse any ellipses.\n index = self._check_ellipsis(index)\n\n # Next, parse the tuple or object\n if isinstance(index, tuple):\n if len(index) == 2:\n row, col = index\n elif len(index) == 1:\n row, col = index[0], slice(None)\n else:\n raise IndexError('invalid number of indices')\n else:\n row, col = index, slice(None)\n\n # Next, check for validity, or transform the index as needed.\n row, col = self._check_boolean(row, col)\n return row, col\n\n def _check_ellipsis(self, index):\n \"\"\"Process indices with Ellipsis. Returns modified index.\"\"\"\n if index is Ellipsis:\n return (slice(None), slice(None))\n elif isinstance(index, tuple):\n # Find first ellipsis\n for j, v in enumerate(index):\n if v is Ellipsis:\n first_ellipsis = j\n break\n else:\n first_ellipsis = None\n\n # Expand the first one\n if first_ellipsis is not None:\n # Shortcuts\n if len(index) == 1:\n return (slice(None), slice(None))\n elif len(index) == 2:\n if first_ellipsis == 0:\n if index[1] is Ellipsis:\n return (slice(None), slice(None))\n else:\n return (slice(None), index[1])\n else:\n return (index[0], slice(None))\n\n # General case\n tail = ()\n for v in index[first_ellipsis+1:]:\n if v is not Ellipsis:\n tail = tail + (v,)\n nd = first_ellipsis + len(tail)\n nslice = max(0, 2 - nd)\n return index[:first_ellipsis] + (slice(None),)*nslice + tail\n\n return index\n\n def _check_boolean(self, row, col):\n from .base import isspmatrix # ew...\n # Supporting sparse boolean indexing with both row and col does\n # not work because spmatrix.ndim is always 2.\n if isspmatrix(row) or isspmatrix(col):\n raise IndexError(\n \"Indexing with sparse matrices is not supported \"\n \"except boolean indexing where matrix and index \"\n \"are equal shapes.\")\n if isinstance(row, np.ndarray) and row.dtype.kind == 'b':\n row = self._boolean_index_to_array(row)\n if isinstance(col, np.ndarray) and col.dtype.kind == 'b':\n col = self._boolean_index_to_array(col)\n return row, col\n\n def _boolean_index_to_array(self, i):\n if i.ndim > 1:\n raise IndexError('invalid index shape')\n return i.nonzero()[0]\n\n def _index_to_arrays(self, i, j):\n i, j = self._check_boolean(i, j)\n\n i_slice = isinstance(i, slice)\n if i_slice:\n i = self._slicetoarange(i, self.shape[0])[:, None]\n else:\n i = np.atleast_1d(i)\n\n if isinstance(j, slice):\n j = self._slicetoarange(j, self.shape[1])[None, :]\n if i.ndim == 1:\n i = i[:, None]\n elif not i_slice:\n raise IndexError('index returns 3-dim structure')\n elif isscalarlike(j):\n # row vector special case\n j = np.atleast_1d(j)\n if i.ndim == 1:\n i, j = np.broadcast_arrays(i, j)\n i = i[:, None]\n j = j[:, None]\n return i, j\n else:\n j = np.atleast_1d(j)\n if i_slice and j.ndim > 1:\n raise IndexError('index returns 3-dim structure')\n\n i, j = np.broadcast_arrays(i, j)\n\n if i.ndim == 1:\n # return column vectors for 1-D indexing\n i = i[None, :]\n j = j[None, :]\n elif i.ndim > 2:\n raise IndexError(\"Index dimension must be <= 2\")\n\n return i, j\n",
"import pytest\n\nimport numpy as np\nfrom datetime import timedelta\n\nimport pandas as pd\nimport pandas.util.testing as tm\nfrom pandas import (timedelta_range, date_range, Series, Timedelta,\n DatetimeIndex, TimedeltaIndex, Index, DataFrame,\n Int64Index, _np_version_under1p8)\nfrom pandas.util.testing import (assert_almost_equal, assert_series_equal,\n assert_index_equal)\n\nfrom ..datetimelike import DatetimeLike\n\nrandn = np.random.randn\n\n\nclass TestTimedeltaIndex(DatetimeLike):\n _holder = TimedeltaIndex\n _multiprocess_can_split_ = True\n\n def setup_method(self, method):\n self.indices = dict(index=tm.makeTimedeltaIndex(10))\n self.setup_indices()\n\n def create_index(self):\n return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)\n\n def test_shift(self):\n # test shift for TimedeltaIndex\n # err8083\n\n drange = self.create_index()\n result = drange.shift(1)\n expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',\n '3 days 01:00:00',\n '4 days 01:00:00', '5 days 01:00:00'],\n freq='D')\n tm.assert_index_equal(result, expected)\n\n result = drange.shift(3, freq='2D 1s')\n expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',\n '8 days 01:00:03', '9 days 01:00:03',\n '10 days 01:00:03'], freq='D')\n tm.assert_index_equal(result, expected)\n\n def test_get_loc(self):\n idx = pd.to_timedelta(['0 days', '1 days', '2 days'])\n\n for method in [None, 'pad', 'backfill', 'nearest']:\n assert idx.get_loc(idx[1], method) == 1\n assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1\n assert idx.get_loc(str(idx[1]), method) == 1\n\n assert idx.get_loc(idx[1], 'pad',\n tolerance=pd.Timedelta(0)) == 1\n assert idx.get_loc(idx[1], 'pad',\n tolerance=np.timedelta64(0, 's')) == 1\n assert idx.get_loc(idx[1], 'pad',\n tolerance=timedelta(0)) == 1\n\n with tm.assert_raises_regex(ValueError, 'must be convertible'):\n idx.get_loc(idx[1], method='nearest', tolerance='foo')\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n assert idx.get_loc('1 day 1 hour', method) == loc\n\n def test_get_loc_nat(self):\n tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00'])\n\n assert tidx.get_loc(pd.NaT) == 1\n assert tidx.get_loc(None) == 1\n assert tidx.get_loc(float('nan')) == 1\n assert tidx.get_loc(np.nan) == 1\n\n def test_get_indexer(self):\n idx = pd.to_timedelta(['0 days', '1 days', '2 days'])\n tm.assert_numpy_array_equal(idx.get_indexer(idx),\n np.array([0, 1, 2], dtype=np.intp))\n\n target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),\n np.array([-1, 0, 1], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),\n np.array([0, 1, 2], dtype=np.intp))\n tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),\n np.array([0, 1, 1], dtype=np.intp))\n\n res = idx.get_indexer(target, 'nearest',\n tolerance=pd.Timedelta('1 hour'))\n tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))\n\n def test_numeric_compat(self):\n\n idx = self._holder(np.arange(5, dtype='int64'))\n didx = self._holder(np.arange(5, dtype='int64') ** 2)\n result = idx * 1\n tm.assert_index_equal(result, idx)\n\n result = 1 * idx\n tm.assert_index_equal(result, idx)\n\n result = idx / 1\n tm.assert_index_equal(result, idx)\n\n result = idx // 1\n tm.assert_index_equal(result, idx)\n\n result = idx * np.array(5, dtype='int64')\n tm.assert_index_equal(result,\n self._holder(np.arange(5, dtype='int64') * 5))\n\n result = idx * np.arange(5, dtype='int64')\n tm.assert_index_equal(result, didx)\n\n result = idx * Series(np.arange(5, dtype='int64'))\n tm.assert_index_equal(result, didx)\n\n result = idx * Series(np.arange(5, dtype='float64') + 0.1)\n tm.assert_index_equal(result, self._holder(np.arange(\n 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))\n\n # invalid\n pytest.raises(TypeError, lambda: idx * idx)\n pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3)))\n pytest.raises(ValueError, lambda: idx * np.array([1, 2]))\n\n def test_pickle_compat_construction(self):\n pass\n\n def test_ufunc_coercions(self):\n # normal ops are also tested in tseries/test_timedeltas.py\n idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],\n freq='2H', name='x')\n\n for result in [idx * 2, np.multiply(idx, 2)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],\n freq='4H', name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq == '4H'\n\n for result in [idx / 2, np.divide(idx, 2)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],\n freq='H', name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq == 'H'\n\n idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],\n freq='2H', name='x')\n for result in [-idx, np.negative(idx)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],\n freq='-2H', name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq == '-2H'\n\n idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],\n freq='H', name='x')\n for result in [abs(idx), np.absolute(idx)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],\n freq=None, name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq is None\n\n def test_fillna_timedelta(self):\n # GH 11343\n idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])\n\n exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])\n tm.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)\n\n exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])\n idx.fillna(pd.Timedelta('3 hour'))\n\n exp = pd.Index(\n [pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)\n tm.assert_index_equal(idx.fillna('x'), exp)\n\n def test_difference_freq(self):\n # GH14323: Difference of TimedeltaIndex should not preserve frequency\n\n index = timedelta_range(\"0 days\", \"5 days\", freq=\"D\")\n\n other = timedelta_range(\"1 days\", \"4 days\", freq=\"D\")\n expected = TimedeltaIndex([\"0 days\", \"5 days\"], freq=None)\n idx_diff = index.difference(other)\n tm.assert_index_equal(idx_diff, expected)\n tm.assert_attr_equal('freq', idx_diff, expected)\n\n other = timedelta_range(\"2 days\", \"5 days\", freq=\"D\")\n idx_diff = index.difference(other)\n expected = TimedeltaIndex([\"0 days\", \"1 days\"], freq=None)\n tm.assert_index_equal(idx_diff, expected)\n tm.assert_attr_equal('freq', idx_diff, expected)\n\n def test_take(self):\n\n tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']\n idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx')\n expected = TimedeltaIndex(tds, freq=None, name='idx')\n\n taken1 = idx.take([2, 4, 10])\n taken2 = idx[[2, 4, 10]]\n\n for taken in [taken1, taken2]:\n tm.assert_index_equal(taken, expected)\n assert isinstance(taken, TimedeltaIndex)\n assert taken.freq is None\n assert taken.name == expected.name\n\n def test_take_fill_value(self):\n # GH 12631\n idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'],\n name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],\n name='xxx')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n def test_isin(self):\n\n index = tm.makeTimedeltaIndex(4)\n result = index.isin(index)\n assert result.all()\n\n result = index.isin(list(index))\n assert result.all()\n\n assert_almost_equal(index.isin([index[2], 5]),\n np.array([False, False, True, False]))\n\n def test_factorize(self):\n idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day',\n '3 day'])\n\n exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)\n exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day'])\n\n arr, idx = idx1.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n arr, idx = idx1.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n # freq must be preserved\n idx3 = timedelta_range('1 day', periods=4, freq='s')\n exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)\n arr, idx = idx3.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, idx3)\n\n def test_join_self(self):\n\n index = timedelta_range('1 day', periods=10)\n kinds = 'outer', 'inner', 'left', 'right'\n for kind in kinds:\n joined = index.join(index, how=kind)\n tm.assert_index_equal(index, joined)\n\n def test_slice_keeps_name(self):\n\n # GH4226\n dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')\n assert dr[1:].name == dr.name\n\n def test_does_not_convert_mixed_integer(self):\n df = tm.makeCustomDataframe(10, 10,\n data_gen_f=lambda *args, **kwargs: randn(),\n r_idx_type='i', c_idx_type='td')\n str(df)\n\n cols = df.columns.join(df.index, how='outer')\n joined = cols.join(df.columns)\n assert cols.dtype == np.dtype('O')\n assert cols.dtype == joined.dtype\n tm.assert_index_equal(cols, joined)\n\n def test_sort_values(self):\n\n idx = TimedeltaIndex(['4d', '1d', '2d'])\n\n ordered = idx.sort_values()\n assert ordered.is_monotonic\n\n ordered = idx.sort_values(ascending=False)\n assert ordered[::-1].is_monotonic\n\n ordered, dexer = idx.sort_values(return_indexer=True)\n assert ordered.is_monotonic\n\n tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]),\n check_dtype=False)\n\n ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)\n assert ordered[::-1].is_monotonic\n\n tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]),\n check_dtype=False)\n\n def test_get_duplicates(self):\n idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day',\n '4day'])\n\n result = idx.get_duplicates()\n ex = TimedeltaIndex(['2 day', '3day'])\n tm.assert_index_equal(result, ex)\n\n def test_argmin_argmax(self):\n idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01',\n '1 day 00:00:02'])\n assert idx.argmin() == 1\n assert idx.argmax() == 0\n\n def test_misc_coverage(self):\n\n rng = timedelta_range('1 day', periods=5)\n result = rng.groupby(rng.days)\n assert isinstance(list(result.values())[0][0], Timedelta)\n\n idx = TimedeltaIndex(['3d', '1d', '2d'])\n assert not idx.equals(list(idx))\n\n non_td = Index(list('abc'))\n assert not idx.equals(list(non_td))\n\n def test_map(self):\n\n rng = timedelta_range('1 day', periods=10)\n\n f = lambda x: x.days\n result = rng.map(f)\n exp = Int64Index([f(x) for x in rng])\n tm.assert_index_equal(result, exp)\n\n def test_comparisons_nat(self):\n\n tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,\n '1 day 00:00:01', '5 day 00:00:03'])\n tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,\n '1 day 00:00:02', '5 days 00:00:03'])\n tdarr = np.array([np.timedelta64(2, 'D'),\n np.timedelta64(2, 'D'), np.timedelta64('nat'),\n np.timedelta64('nat'),\n np.timedelta64(1, 'D') + np.timedelta64(2, 's'),\n np.timedelta64(5, 'D') + np.timedelta64(3, 's')])\n\n if _np_version_under1p8:\n # cannot test array because np.datetime('nat') returns today's date\n cases = [(tdidx1, tdidx2)]\n else:\n cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]\n\n # Check pd.NaT is handles as the same as np.nan\n for idx1, idx2 in cases:\n\n result = idx1 < idx2\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 > idx1\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= idx2\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 >= idx1\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == idx2\n expected = np.array([False, False, False, False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != idx2\n expected = np.array([True, True, True, True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_comparisons_coverage(self):\n rng = timedelta_range('1 days', periods=10)\n\n result = rng < rng[3]\n exp = np.array([True, True, True] + [False] * 7)\n tm.assert_numpy_array_equal(result, exp)\n\n # raise TypeError for now\n pytest.raises(TypeError, rng.__lt__, rng[3].value)\n\n result = rng == list(rng)\n exp = rng == rng\n tm.assert_numpy_array_equal(result, exp)\n\n def test_total_seconds(self):\n # GH 10939\n # test index\n rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,\n freq='s')\n expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,\n 1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]\n tm.assert_almost_equal(rng.total_seconds(), Index(expt))\n\n # test Series\n s = Series(rng)\n s_expt = Series(expt, index=[0, 1])\n tm.assert_series_equal(s.dt.total_seconds(), s_expt)\n\n # with nat\n s[1] = np.nan\n s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +\n 12 + 100123456. / 1e9, np.nan], index=[0, 1])\n tm.assert_series_equal(s.dt.total_seconds(), s_expt)\n\n # with both nat\n s = Series([np.nan, np.nan], dtype='timedelta64[ns]')\n tm.assert_series_equal(s.dt.total_seconds(),\n Series([np.nan, np.nan], index=[0, 1]))\n\n def test_pass_TimedeltaIndex_to_index(self):\n\n rng = timedelta_range('1 days', '10 days')\n idx = Index(rng, dtype=object)\n\n expected = Index(rng.to_pytimedelta(), dtype=object)\n\n tm.assert_numpy_array_equal(idx.values, expected.values)\n\n def test_pickle(self):\n\n rng = timedelta_range('1 days', periods=10)\n rng_p = tm.round_trip_pickle(rng)\n tm.assert_index_equal(rng, rng_p)\n\n def test_hash_error(self):\n index = timedelta_range('1 days', periods=10)\n with tm.assert_raises_regex(TypeError, \"unhashable type: %r\" %\n type(index).__name__):\n hash(index)\n\n def test_append_join_nondatetimeindex(self):\n rng = timedelta_range('1 days', periods=10)\n idx = Index(['a', 'b', 'c', 'd'])\n\n result = rng.append(idx)\n assert isinstance(result[0], Timedelta)\n\n # it works\n rng.join(idx, how='outer')\n\n def test_append_numpy_bug_1681(self):\n\n td = timedelta_range('1 days', '10 days', freq='2D')\n a = DataFrame()\n c = DataFrame({'A': 'foo', 'B': td}, index=td)\n str(c)\n\n result = a.append(c)\n assert (result['B'] == td).all()\n\n def test_fields(self):\n rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,\n freq='s')\n tm.assert_index_equal(rng.days, Index([1, 1], dtype='int64'))\n tm.assert_index_equal(\n rng.seconds,\n Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],\n dtype='int64'))\n tm.assert_index_equal(\n rng.microseconds,\n Index([100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))\n tm.assert_index_equal(rng.nanoseconds,\n Index([456, 456], dtype='int64'))\n\n pytest.raises(AttributeError, lambda: rng.hours)\n pytest.raises(AttributeError, lambda: rng.minutes)\n pytest.raises(AttributeError, lambda: rng.milliseconds)\n\n # with nat\n s = Series(rng)\n s[1] = np.nan\n\n tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))\n tm.assert_series_equal(s.dt.seconds, Series(\n [10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))\n\n # preserve name (GH15589)\n rng.name = 'name'\n assert rng.days.name == 'name'\n\n def test_freq_conversion(self):\n\n # doc example\n\n # series\n td = Series(date_range('20130101', periods=4)) - \\\n Series(date_range('20121201', periods=4))\n td[2] += timedelta(minutes=5, seconds=3)\n td[3] = np.nan\n\n result = td / np.timedelta64(1, 'D')\n expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan\n ])\n assert_series_equal(result, expected)\n\n result = td.astype('timedelta64[D]')\n expected = Series([31, 31, 31, np.nan])\n assert_series_equal(result, expected)\n\n result = td / np.timedelta64(1, 's')\n expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,\n np.nan])\n assert_series_equal(result, expected)\n\n result = td.astype('timedelta64[s]')\n assert_series_equal(result, expected)\n\n # tdi\n td = TimedeltaIndex(td)\n\n result = td / np.timedelta64(1, 'D')\n expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])\n assert_index_equal(result, expected)\n\n result = td.astype('timedelta64[D]')\n expected = Index([31, 31, 31, np.nan])\n assert_index_equal(result, expected)\n\n result = td / np.timedelta64(1, 's')\n expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,\n np.nan])\n assert_index_equal(result, expected)\n\n result = td.astype('timedelta64[s]')\n assert_index_equal(result, expected)\n\n\nclass TestSlicing(object):\n\n def test_timedelta(self):\n # this is valid too\n index = date_range('1/1/2000', periods=50, freq='B')\n shifted = index + timedelta(1)\n back = shifted + timedelta(-1)\n assert tm.equalContents(index, back)\n assert shifted.freq == index.freq\n assert shifted.freq == back.freq\n\n result = index - timedelta(1)\n expected = index + timedelta(-1)\n tm.assert_index_equal(result, expected)\n\n # GH4134, buggy with timedeltas\n rng = date_range('2013', '2014')\n s = Series(rng)\n result1 = rng - pd.offsets.Hour(1)\n result2 = DatetimeIndex(s - np.timedelta64(100000000))\n result3 = rng - np.timedelta64(100000000)\n result4 = DatetimeIndex(s - pd.offsets.Hour(1))\n tm.assert_index_equal(result1, result4)\n tm.assert_index_equal(result2, result3)\n\n\nclass TestTimeSeries(object):\n _multiprocess_can_split_ = True\n\n def test_series_box_timedelta(self):\n rng = timedelta_range('1 day 1 s', periods=5, freq='h')\n s = Series(rng)\n assert isinstance(s[1], Timedelta)\n assert isinstance(s.iat[2], Timedelta)\n",
"\"\"\"Tests for polynomial module.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nimport numpy.polynomial.polynomial as poly\nfrom numpy.testing import (\n TestCase, assert_almost_equal, assert_raises,\n assert_equal, assert_, run_module_suite)\n\n\ndef trim(x):\n return poly.polytrim(x, tol=1e-6)\n\nT0 = [1]\nT1 = [0, 1]\nT2 = [-1, 0, 2]\nT3 = [0, -3, 0, 4]\nT4 = [1, 0, -8, 0, 8]\nT5 = [0, 5, 0, -20, 0, 16]\nT6 = [-1, 0, 18, 0, -48, 0, 32]\nT7 = [0, -7, 0, 56, 0, -112, 0, 64]\nT8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]\nT9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]\n\nTlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]\n\n\nclass TestConstants(TestCase):\n\n def test_polydomain(self):\n assert_equal(poly.polydomain, [-1, 1])\n\n def test_polyzero(self):\n assert_equal(poly.polyzero, [0])\n\n def test_polyone(self):\n assert_equal(poly.polyone, [1])\n\n def test_polyx(self):\n assert_equal(poly.polyx, [0, 1])\n\n\nclass TestArithmetic(TestCase):\n\n def test_polyadd(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n tgt = np.zeros(max(i, j) + 1)\n tgt[i] += 1\n tgt[j] += 1\n res = poly.polyadd([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_polysub(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n tgt = np.zeros(max(i, j) + 1)\n tgt[i] += 1\n tgt[j] -= 1\n res = poly.polysub([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_polymulx(self):\n assert_equal(poly.polymulx([0]), [0])\n assert_equal(poly.polymulx([1]), [0, 1])\n for i in range(1, 5):\n ser = [0]*i + [1]\n tgt = [0]*(i + 1) + [1]\n assert_equal(poly.polymulx(ser), tgt)\n\n def test_polymul(self):\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n tgt = np.zeros(i + j + 1)\n tgt[i + j] += 1\n res = poly.polymul([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_polydiv(self):\n # check zero division\n assert_raises(ZeroDivisionError, poly.polydiv, [1], [0])\n\n # check scalar division\n quo, rem = poly.polydiv([2], [2])\n assert_equal((quo, rem), (1, 0))\n quo, rem = poly.polydiv([2, 2], [2])\n assert_equal((quo, rem), ((1, 1), 0))\n\n # check rest.\n for i in range(5):\n for j in range(5):\n msg = \"At i=%d, j=%d\" % (i, j)\n ci = [0]*i + [1, 2]\n cj = [0]*j + [1, 2]\n tgt = poly.polyadd(ci, cj)\n quo, rem = poly.polydiv(tgt, ci)\n res = poly.polyadd(poly.polymul(quo, ci), rem)\n assert_equal(res, tgt, err_msg=msg)\n\n\nclass TestEvaluation(TestCase):\n # coefficients of 1 + 2*x + 3*x**2\n c1d = np.array([1., 2., 3.])\n c2d = np.einsum('i,j->ij', c1d, c1d)\n c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)\n\n # some random values in [-1, 1)\n x = np.random.random((3, 5))*2 - 1\n y = poly.polyval(x, [1., 2., 3.])\n\n def test_polyval(self):\n #check empty input\n assert_equal(poly.polyval([], [1]).size, 0)\n\n #check normal input)\n x = np.linspace(-1, 1)\n y = [x**i for i in range(5)]\n for i in range(5):\n tgt = y[i]\n res = poly.polyval(x, [0]*i + [1])\n assert_almost_equal(res, tgt)\n tgt = x*(x**2 - 1)\n res = poly.polyval(x, [0, -1, 0, 1])\n assert_almost_equal(res, tgt)\n\n #check that shape is preserved\n for i in range(3):\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(poly.polyval(x, [1]).shape, dims)\n assert_equal(poly.polyval(x, [1, 0]).shape, dims)\n assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)\n\n def test_polyvalfromroots(self):\n # check exception for broadcasting x values over root array with\n # too few dimensions\n assert_raises(ValueError, poly.polyvalfromroots,\n [1], [1], tensor=False)\n\n # check empty input\n assert_equal(poly.polyvalfromroots([], [1]).size, 0)\n assert_(poly.polyvalfromroots([], [1]).shape == (0,))\n\n # check empty input + multidimensional roots\n assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0)\n assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0))\n\n # check scalar input\n assert_equal(poly.polyvalfromroots(1, 1), 0)\n assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,))\n\n # check normal input)\n x = np.linspace(-1, 1)\n y = [x**i for i in range(5)]\n for i in range(1, 5):\n tgt = y[i]\n res = poly.polyvalfromroots(x, [0]*i)\n assert_almost_equal(res, tgt)\n tgt = x*(x - 1)*(x + 1)\n res = poly.polyvalfromroots(x, [-1, 0, 1])\n assert_almost_equal(res, tgt)\n\n # check that shape is preserved\n for i in range(3):\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(poly.polyvalfromroots(x, [1]).shape, dims)\n assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims)\n assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims)\n\n # check compatibility with factorization\n ptest = [15, 2, -16, -2, 1]\n r = poly.polyroots(ptest)\n x = np.linspace(-1, 1)\n assert_almost_equal(poly.polyval(x, ptest),\n poly.polyvalfromroots(x, r))\n\n # check multidimensional arrays of roots and values\n # check tensor=False\n rshape = (3, 5)\n x = np.arange(-3, 2)\n r = np.random.randint(-5, 5, size=rshape)\n res = poly.polyvalfromroots(x, r, tensor=False)\n tgt = np.empty(r.shape[1:])\n for ii in range(tgt.size):\n tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii])\n assert_equal(res, tgt)\n\n # check tensor=True\n x = np.vstack([x, 2*x])\n res = poly.polyvalfromroots(x, r, tensor=True)\n tgt = np.empty(r.shape[1:] + x.shape)\n for ii in range(r.shape[1]):\n for jj in range(x.shape[0]):\n tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii])\n assert_equal(res, tgt)\n\n def test_polyval2d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test exceptions\n assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d)\n\n #test values\n tgt = y1*y2\n res = poly.polyval2d(x1, x2, self.c2d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = poly.polyval2d(z, z, self.c2d)\n assert_(res.shape == (2, 3))\n\n def test_polyval3d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test exceptions\n assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d)\n\n #test values\n tgt = y1*y2*y3\n res = poly.polyval3d(x1, x2, x3, self.c3d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = poly.polyval3d(z, z, z, self.c3d)\n assert_(res.shape == (2, 3))\n\n def test_polygrid2d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test values\n tgt = np.einsum('i,j->ij', y1, y2)\n res = poly.polygrid2d(x1, x2, self.c2d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = poly.polygrid2d(z, z, self.c2d)\n assert_(res.shape == (2, 3)*2)\n\n def test_polygrid3d(self):\n x1, x2, x3 = self.x\n y1, y2, y3 = self.y\n\n #test values\n tgt = np.einsum('i,j,k->ijk', y1, y2, y3)\n res = poly.polygrid3d(x1, x2, x3, self.c3d)\n assert_almost_equal(res, tgt)\n\n #test shape\n z = np.ones((2, 3))\n res = poly.polygrid3d(z, z, z, self.c3d)\n assert_(res.shape == (2, 3)*3)\n\n\nclass TestIntegral(TestCase):\n\n def test_polyint(self):\n # check exceptions\n assert_raises(ValueError, poly.polyint, [0], .5)\n assert_raises(ValueError, poly.polyint, [0], -1)\n assert_raises(ValueError, poly.polyint, [0], 1, [0, 0])\n\n # test integration of zero polynomial\n for i in range(2, 5):\n k = [0]*(i - 2) + [1]\n res = poly.polyint([0], m=i, k=k)\n assert_almost_equal(res, [0, 1])\n\n # check single integration with integration constant\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [1/scl]\n res = poly.polyint(pol, m=1, k=[i])\n assert_almost_equal(trim(res), trim(tgt))\n\n # check single integration with integration constant and lbnd\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n res = poly.polyint(pol, m=1, k=[i], lbnd=-1)\n assert_almost_equal(poly.polyval(-1, res), i)\n\n # check single integration with integration constant and scaling\n for i in range(5):\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [2/scl]\n res = poly.polyint(pol, m=1, k=[i], scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with default k\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = poly.polyint(tgt, m=1)\n res = poly.polyint(pol, m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with defined k\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = poly.polyint(tgt, m=1, k=[k])\n res = poly.polyint(pol, m=j, k=list(range(j)))\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with lbnd\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)\n res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with scaling\n for i in range(5):\n for j in range(2, 5):\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j):\n tgt = poly.polyint(tgt, m=1, k=[k], scl=2)\n res = poly.polyint(pol, m=j, k=list(range(j)), scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_polyint_axis(self):\n # check that axis keyword works\n c2d = np.random.random((3, 4))\n\n tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T\n res = poly.polyint(c2d, axis=0)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([poly.polyint(c) for c in c2d])\n res = poly.polyint(c2d, axis=1)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([poly.polyint(c, k=3) for c in c2d])\n res = poly.polyint(c2d, k=3, axis=1)\n assert_almost_equal(res, tgt)\n\n\nclass TestDerivative(TestCase):\n\n def test_polyder(self):\n # check exceptions\n assert_raises(ValueError, poly.polyder, [0], .5)\n assert_raises(ValueError, poly.polyder, [0], -1)\n\n # check that zeroth derivative does nothing\n for i in range(5):\n tgt = [0]*i + [1]\n res = poly.polyder(tgt, m=0)\n assert_equal(trim(res), trim(tgt))\n\n # check that derivation is the inverse of integration\n for i in range(5):\n for j in range(2, 5):\n tgt = [0]*i + [1]\n res = poly.polyder(poly.polyint(tgt, m=j), m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check derivation with scaling\n for i in range(5):\n for j in range(2, 5):\n tgt = [0]*i + [1]\n res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_polyder_axis(self):\n # check that axis keyword works\n c2d = np.random.random((3, 4))\n\n tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T\n res = poly.polyder(c2d, axis=0)\n assert_almost_equal(res, tgt)\n\n tgt = np.vstack([poly.polyder(c) for c in c2d])\n res = poly.polyder(c2d, axis=1)\n assert_almost_equal(res, tgt)\n\n\nclass TestVander(TestCase):\n # some random values in [-1, 1)\n x = np.random.random((3, 5))*2 - 1\n\n def test_polyvander(self):\n # check for 1d x\n x = np.arange(3)\n v = poly.polyvander(x, 3)\n assert_(v.shape == (3, 4))\n for i in range(4):\n coef = [0]*i + [1]\n assert_almost_equal(v[..., i], poly.polyval(x, coef))\n\n # check for 2d x\n x = np.array([[1, 2], [3, 4], [5, 6]])\n v = poly.polyvander(x, 3)\n assert_(v.shape == (3, 2, 4))\n for i in range(4):\n coef = [0]*i + [1]\n assert_almost_equal(v[..., i], poly.polyval(x, coef))\n\n def test_polyvander2d(self):\n # also tests polyval2d for non-square coefficient array\n x1, x2, x3 = self.x\n c = np.random.random((2, 3))\n van = poly.polyvander2d(x1, x2, [1, 2])\n tgt = poly.polyval2d(x1, x2, c)\n res = np.dot(van, c.flat)\n assert_almost_equal(res, tgt)\n\n # check shape\n van = poly.polyvander2d([x1], [x2], [1, 2])\n assert_(van.shape == (1, 5, 6))\n\n def test_polyvander3d(self):\n # also tests polyval3d for non-square coefficient array\n x1, x2, x3 = self.x\n c = np.random.random((2, 3, 4))\n van = poly.polyvander3d(x1, x2, x3, [1, 2, 3])\n tgt = poly.polyval3d(x1, x2, x3, c)\n res = np.dot(van, c.flat)\n assert_almost_equal(res, tgt)\n\n # check shape\n van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3])\n assert_(van.shape == (1, 5, 24))\n\n\nclass TestCompanion(TestCase):\n\n def test_raises(self):\n assert_raises(ValueError, poly.polycompanion, [])\n assert_raises(ValueError, poly.polycompanion, [1])\n\n def test_dimensions(self):\n for i in range(1, 5):\n coef = [0]*i + [1]\n assert_(poly.polycompanion(coef).shape == (i, i))\n\n def test_linear_root(self):\n assert_(poly.polycompanion([1, 2])[0, 0] == -.5)\n\n\nclass TestMisc(TestCase):\n\n def test_polyfromroots(self):\n res = poly.polyfromroots([])\n assert_almost_equal(trim(res), [1])\n for i in range(1, 5):\n roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n tgt = Tlist[i]\n res = poly.polyfromroots(roots)*2**(i-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_polyroots(self):\n assert_almost_equal(poly.polyroots([1]), [])\n assert_almost_equal(poly.polyroots([1, 2]), [-.5])\n for i in range(2, 5):\n tgt = np.linspace(-1, 1, i)\n res = poly.polyroots(poly.polyfromroots(tgt))\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_polyfit(self):\n def f(x):\n return x*(x - 1)*(x - 2)\n\n def f2(x):\n return x**4 + x**2 + 1\n\n # Test exceptions\n assert_raises(ValueError, poly.polyfit, [1], [1], -1)\n assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)\n assert_raises(TypeError, poly.polyfit, [], [1], 0)\n assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)\n assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)\n assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)\n assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])\n assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1])\n assert_raises(ValueError, poly.polyfit, [1], [1], [-1,])\n assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6])\n assert_raises(TypeError, poly.polyfit, [1], [1], [])\n\n # Test fit\n x = np.linspace(0, 2)\n y = f(x)\n #\n coef3 = poly.polyfit(x, y, 3)\n assert_equal(len(coef3), 4)\n assert_almost_equal(poly.polyval(x, coef3), y)\n coef3 = poly.polyfit(x, y, [0, 1, 2, 3])\n assert_equal(len(coef3), 4)\n assert_almost_equal(poly.polyval(x, coef3), y)\n #\n coef4 = poly.polyfit(x, y, 4)\n assert_equal(len(coef4), 5)\n assert_almost_equal(poly.polyval(x, coef4), y)\n coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4])\n assert_equal(len(coef4), 5)\n assert_almost_equal(poly.polyval(x, coef4), y)\n #\n coef2d = poly.polyfit(x, np.array([y, y]).T, 3)\n assert_almost_equal(coef2d, np.array([coef3, coef3]).T)\n coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3])\n assert_almost_equal(coef2d, np.array([coef3, coef3]).T)\n # test weighting\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n yw[0::2] = 0\n wcoef3 = poly.polyfit(x, yw, 3, w=w)\n assert_almost_equal(wcoef3, coef3)\n wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w)\n assert_almost_equal(wcoef3, coef3)\n #\n wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w)\n assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)\n wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)\n assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)\n # test scaling with complex values x points whose square\n # is zero when summed.\n x = [1, 1j, -1, -1j]\n assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])\n assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1])\n # test fitting only even Polyendre polynomials\n x = np.linspace(-1, 1)\n y = f2(x)\n coef1 = poly.polyfit(x, y, 4)\n assert_almost_equal(poly.polyval(x, coef1), y)\n coef2 = poly.polyfit(x, y, [0, 2, 4])\n assert_almost_equal(poly.polyval(x, coef2), y)\n assert_almost_equal(coef1, coef2)\n\n def test_polytrim(self):\n coef = [2, -1, 1, 0]\n\n # Test exceptions\n assert_raises(ValueError, poly.polytrim, coef, -1)\n\n # Test results\n assert_equal(poly.polytrim(coef), coef[:-1])\n assert_equal(poly.polytrim(coef, 1), coef[:-3])\n assert_equal(poly.polytrim(coef, 2), [0])\n\n def test_polyline(self):\n assert_equal(poly.polyline(3, 4), [3, 4])\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"from __future__ import print_function, division, absolute_import\n\nfrom timeit import default_timer as timer\nimport numpy as np\n\nfrom numba import unittest_support as unittest\nfrom numba import hsa, float32\n\n\nclass TestMatMul(unittest.TestCase):\n def test_matmul_naive(self):\n @hsa.jit\n def matmul(A, B, C):\n i = hsa.get_global_id(0)\n j = hsa.get_global_id(1)\n\n if i >= C.shape[0] or j >= C.shape[1]:\n return\n\n tmp = 0\n\n for k in range(A.shape[1]):\n tmp += A[i, k] * B[k, j]\n\n C[i, j] = tmp\n\n N = 256\n A = np.random.random((N, N)).astype(np.float32)\n B = np.random.random((N, N)).astype(np.float32)\n C = np.zeros_like(A)\n\n with hsa.register(A, B, C):\n ts = timer()\n matmul[(N // 16, N // 16), (16, 16)](A, B, C)\n te = timer()\n print(\"1st GPU time:\", te - ts)\n\n with hsa.register(A, B, C):\n ts = timer()\n matmul[(N // 16, N // 16), (16, 16)](A, B, C)\n te = timer()\n print(\"2nd GPU time:\", te - ts)\n\n ts = timer()\n ans = np.dot(A, B)\n te = timer()\n print(\"CPU time:\", te - ts)\n np.testing.assert_allclose(ans, C, rtol=1e-5)\n\n def test_matmul_fast(self):\n blocksize = 20\n gridsize = 20\n\n @hsa.jit\n def matmulfast(A, B, C):\n x = hsa.get_global_id(0)\n y = hsa.get_global_id(1)\n\n tx = hsa.get_local_id(0)\n ty = hsa.get_local_id(1)\n\n sA = hsa.shared.array(shape=(blocksize, blocksize), dtype=float32)\n sB = hsa.shared.array(shape=(blocksize, blocksize), dtype=float32)\n\n if x >= C.shape[0] or y >= C.shape[1]:\n return\n\n tmp = 0\n\n for i in range(gridsize):\n # preload\n sA[tx, ty] = A[x, ty + i * blocksize]\n sB[tx, ty] = B[tx + i * blocksize, y]\n # wait for preload to end\n hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)\n # compute loop\n for j in range(blocksize):\n tmp += sA[tx, j] * sB[j, ty]\n # wait for compute to end\n hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)\n\n C[x, y] = tmp\n\n N = gridsize * blocksize\n A = np.random.random((N, N)).astype(np.float32)\n B = np.random.random((N, N)).astype(np.float32)\n C = np.zeros_like(A)\n\n griddim = gridsize, gridsize\n blockdim = blocksize, blocksize\n\n with hsa.register(A, B, C):\n ts = timer()\n matmulfast[griddim, blockdim](A, B, C)\n te = timer()\n print(\"1st GPU time:\", te - ts)\n\n with hsa.register(A, B, C):\n ts = timer()\n matmulfast[griddim, blockdim](A, B, C)\n te = timer()\n print(\"2nd GPU time:\", te - ts)\n\n ts = timer()\n ans = np.dot(A, B)\n te = timer()\n print(\"CPU time:\", te - ts)\n np.testing.assert_allclose(ans, C, rtol=1e-5)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"\nThese the test the public routines exposed in types/common.py\nrelated to inference and not otherwise tested in types/test_common.py\n\n\"\"\"\nfrom warnings import catch_warnings\nimport collections\nimport re\nfrom datetime import datetime, date, timedelta, time\nimport numpy as np\nimport pytz\nimport pytest\n\nimport pandas as pd\nfrom pandas._libs import tslib, lib\nfrom pandas import (Series, Index, DataFrame, Timedelta,\n DatetimeIndex, TimedeltaIndex, Timestamp,\n Panel, Period, Categorical)\nfrom pandas.compat import u, PY2, PY3, StringIO, lrange\nfrom pandas.core.dtypes import inference\nfrom pandas.core.dtypes.common import (\n is_timedelta64_dtype,\n is_timedelta64_ns_dtype,\n is_datetime64_dtype,\n is_datetime64_ns_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_number,\n is_integer,\n is_float,\n is_bool,\n is_scalar,\n is_scipy_sparse,\n _ensure_int32,\n _ensure_categorical)\nfrom pandas.core.dtypes.missing import isnull\nfrom pandas.util import testing as tm\n\n\ndef test_is_sequence():\n is_seq = inference.is_sequence\n assert (is_seq((1, 2)))\n assert (is_seq([1, 2]))\n assert (not is_seq(\"abcd\"))\n assert (not is_seq(u(\"abcd\")))\n assert (not is_seq(np.int64))\n\n class A(object):\n\n def __getitem__(self):\n return 1\n\n assert (not is_seq(A()))\n\n\ndef test_is_list_like():\n passes = ([], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),\n Series([]), Series(['a']).str)\n fails = (1, '2', object())\n\n for p in passes:\n assert inference.is_list_like(p)\n\n for f in fails:\n assert not inference.is_list_like(f)\n\n\[email protected]('inner', [\n [], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),\n Series([]), Series(['a']).str, (x for x in range(5))\n])\[email protected]('outer', [\n list, Series, np.array, tuple\n])\ndef test_is_nested_list_like_passes(inner, outer):\n result = outer([inner for _ in range(5)])\n assert inference.is_list_like(result)\n\n\[email protected]('obj', [\n 'abc', [], [1], (1,), ['a'], 'a', {'a'},\n [1, 2, 3], Series([1]), DataFrame({\"A\": [1]}),\n ([1, 2] for _ in range(5)),\n])\ndef test_is_nested_list_like_fails(obj):\n assert not inference.is_nested_list_like(obj)\n\n\ndef test_is_dict_like():\n passes = [{}, {'A': 1}, Series([1])]\n fails = ['1', 1, [1, 2], (1, 2), range(2), Index([1])]\n\n for p in passes:\n assert inference.is_dict_like(p)\n\n for f in fails:\n assert not inference.is_dict_like(f)\n\n\ndef test_is_file_like():\n class MockFile(object):\n pass\n\n is_file = inference.is_file_like\n\n data = StringIO(\"data\")\n assert is_file(data)\n\n # No read / write attributes\n # No iterator attributes\n m = MockFile()\n assert not is_file(m)\n\n MockFile.write = lambda self: 0\n\n # Write attribute but not an iterator\n m = MockFile()\n assert not is_file(m)\n\n # gh-16530: Valid iterator just means we have the\n # __iter__ attribute for our purposes.\n MockFile.__iter__ = lambda self: self\n\n # Valid write-only file\n m = MockFile()\n assert is_file(m)\n\n del MockFile.write\n MockFile.read = lambda self: 0\n\n # Valid read-only file\n m = MockFile()\n assert is_file(m)\n\n # Iterator but no read / write attributes\n data = [1, 2, 3]\n assert not is_file(data)\n\n if PY3:\n from unittest import mock\n assert not is_file(mock.Mock())\n\n\ndef test_is_named_tuple():\n passes = (collections.namedtuple('Test', list('abc'))(1, 2, 3), )\n fails = ((1, 2, 3), 'a', Series({'pi': 3.14}))\n\n for p in passes:\n assert inference.is_named_tuple(p)\n\n for f in fails:\n assert not inference.is_named_tuple(f)\n\n\ndef test_is_hashable():\n\n # all new-style classes are hashable by default\n class HashableClass(object):\n pass\n\n class UnhashableClass1(object):\n __hash__ = None\n\n class UnhashableClass2(object):\n\n def __hash__(self):\n raise TypeError(\"Not hashable\")\n\n hashable = (1,\n 3.14,\n np.float64(3.14),\n 'a',\n tuple(),\n (1, ),\n HashableClass(), )\n not_hashable = ([], UnhashableClass1(), )\n abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )\n\n for i in hashable:\n assert inference.is_hashable(i)\n for i in not_hashable:\n assert not inference.is_hashable(i)\n for i in abc_hashable_not_really_hashable:\n assert not inference.is_hashable(i)\n\n # numpy.array is no longer collections.Hashable as of\n # https://github.com/numpy/numpy/pull/5326, just test\n # is_hashable()\n assert not inference.is_hashable(np.array([]))\n\n # old-style classes in Python 2 don't appear hashable to\n # collections.Hashable but also seem to support hash() by default\n if PY2:\n\n class OldStyleClass():\n pass\n\n c = OldStyleClass()\n assert not isinstance(c, collections.Hashable)\n assert inference.is_hashable(c)\n hash(c) # this will not raise\n\n\ndef test_is_re():\n passes = re.compile('ad'),\n fails = 'x', 2, 3, object()\n\n for p in passes:\n assert inference.is_re(p)\n\n for f in fails:\n assert not inference.is_re(f)\n\n\ndef test_is_recompilable():\n passes = (r'a', u('x'), r'asdf', re.compile('adsf'), u(r'\\u2233\\s*'),\n re.compile(r''))\n fails = 1, [], object()\n\n for p in passes:\n assert inference.is_re_compilable(p)\n\n for f in fails:\n assert not inference.is_re_compilable(f)\n\n\nclass TestInference(object):\n\n def test_infer_dtype_bytes(self):\n compare = 'string' if PY2 else 'bytes'\n\n # string array of bytes\n arr = np.array(list('abc'), dtype='S1')\n assert lib.infer_dtype(arr) == compare\n\n # object array of bytes\n arr = arr.astype(object)\n assert lib.infer_dtype(arr) == compare\n\n def test_isinf_scalar(self):\n # GH 11352\n assert lib.isposinf_scalar(float('inf'))\n assert lib.isposinf_scalar(np.inf)\n assert not lib.isposinf_scalar(-np.inf)\n assert not lib.isposinf_scalar(1)\n assert not lib.isposinf_scalar('a')\n\n assert lib.isneginf_scalar(float('-inf'))\n assert lib.isneginf_scalar(-np.inf)\n assert not lib.isneginf_scalar(np.inf)\n assert not lib.isneginf_scalar(1)\n assert not lib.isneginf_scalar('a')\n\n def test_maybe_convert_numeric_infinities(self):\n # see gh-13274\n infinities = ['inf', 'inF', 'iNf', 'Inf',\n 'iNF', 'InF', 'INf', 'INF']\n na_values = set(['', 'NULL', 'nan'])\n\n pos = np.array(['inf'], dtype=np.float64)\n neg = np.array(['-inf'], dtype=np.float64)\n\n msg = \"Unable to parse string\"\n\n for infinity in infinities:\n for maybe_int in (True, False):\n out = lib.maybe_convert_numeric(\n np.array([infinity], dtype=object),\n na_values, maybe_int)\n tm.assert_numpy_array_equal(out, pos)\n\n out = lib.maybe_convert_numeric(\n np.array(['-' + infinity], dtype=object),\n na_values, maybe_int)\n tm.assert_numpy_array_equal(out, neg)\n\n out = lib.maybe_convert_numeric(\n np.array([u(infinity)], dtype=object),\n na_values, maybe_int)\n tm.assert_numpy_array_equal(out, pos)\n\n out = lib.maybe_convert_numeric(\n np.array(['+' + infinity], dtype=object),\n na_values, maybe_int)\n tm.assert_numpy_array_equal(out, pos)\n\n # too many characters\n with tm.assert_raises_regex(ValueError, msg):\n lib.maybe_convert_numeric(\n np.array(['foo_' + infinity], dtype=object),\n na_values, maybe_int)\n\n def test_maybe_convert_numeric_post_floatify_nan(self):\n # see gh-13314\n data = np.array(['1.200', '-999.000', '4.500'], dtype=object)\n expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)\n nan_values = set([-999, -999.0])\n\n for coerce_type in (True, False):\n out = lib.maybe_convert_numeric(data, nan_values, coerce_type)\n tm.assert_numpy_array_equal(out, expected)\n\n def test_convert_infs(self):\n arr = np.array(['inf', 'inf', 'inf'], dtype='O')\n result = lib.maybe_convert_numeric(arr, set(), False)\n assert result.dtype == np.float64\n\n arr = np.array(['-inf', '-inf', '-inf'], dtype='O')\n result = lib.maybe_convert_numeric(arr, set(), False)\n assert result.dtype == np.float64\n\n def test_scientific_no_exponent(self):\n # See PR 12215\n arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')\n result = lib.maybe_convert_numeric(arr, set(), False, True)\n assert np.all(np.isnan(result))\n\n def test_convert_non_hashable(self):\n # GH13324\n # make sure that we are handing non-hashables\n arr = np.array([[10.0, 2], 1.0, 'apple'])\n result = lib.maybe_convert_numeric(arr, set(), False, True)\n tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))\n\n def test_convert_numeric_uint64(self):\n arr = np.array([2**63], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)\n\n arr = np.array([str(2**63)], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)\n\n arr = np.array([np.uint64(2**63)], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)\n\n def test_convert_numeric_uint64_nan(self):\n msg = 'uint64 array detected'\n cases = [(np.array([2**63, np.nan], dtype=object), set()),\n (np.array([str(2**63), np.nan], dtype=object), set()),\n (np.array([np.nan, 2**63], dtype=object), set()),\n (np.array([np.nan, str(2**63)], dtype=object), set()),\n (np.array([2**63, 2**63 + 1], dtype=object), set([2**63])),\n (np.array([str(2**63), str(2**63 + 1)],\n dtype=object), set([2**63]))]\n\n for coerce in (True, False):\n for arr, na_values in cases:\n if coerce:\n with tm.assert_raises_regex(ValueError, msg):\n lib.maybe_convert_numeric(arr, na_values,\n coerce_numeric=coerce)\n else:\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(\n arr, na_values), arr)\n\n def test_convert_numeric_int64_uint64(self):\n msg = 'uint64 and negative values detected'\n cases = [np.array([2**63, -1], dtype=object),\n np.array([str(2**63), -1], dtype=object),\n np.array([str(2**63), str(-1)], dtype=object),\n np.array([-1, 2**63], dtype=object),\n np.array([-1, str(2**63)], dtype=object),\n np.array([str(-1), str(2**63)], dtype=object)]\n\n for coerce in (True, False):\n for case in cases:\n if coerce:\n with tm.assert_raises_regex(ValueError, msg):\n lib.maybe_convert_numeric(case, set(),\n coerce_numeric=coerce)\n else:\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(\n case, set()), case)\n\n def test_maybe_convert_objects_uint64(self):\n # see gh-4471\n arr = np.array([2**63], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n # NumPy bug: can't compare uint64 to int64, as that\n # results in both casting to float64, so we should\n # make sure that this function is robust against it\n arr = np.array([np.uint64(2**63)], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n arr = np.array([2, -1], dtype=object)\n exp = np.array([2, -1], dtype=np.int64)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n arr = np.array([2**63, -1], dtype=object)\n exp = np.array([2**63, -1], dtype=object)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n def test_mixed_dtypes_remain_object_array(self):\n # GH14956\n array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],\n dtype=object)\n result = lib.maybe_convert_objects(array, convert_datetime=1)\n tm.assert_numpy_array_equal(result, array)\n\n\nclass TestTypeInference(object):\n\n def test_length_zero(self):\n result = lib.infer_dtype(np.array([], dtype='i4'))\n assert result == 'integer'\n\n result = lib.infer_dtype([])\n assert result == 'empty'\n\n def test_integers(self):\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')\n result = lib.infer_dtype(arr)\n assert result == 'integer'\n\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')\n result = lib.infer_dtype(arr)\n assert result == 'mixed-integer'\n\n arr = np.array([1, 2, 3, 4, 5], dtype='i4')\n result = lib.infer_dtype(arr)\n assert result == 'integer'\n\n def test_bools(self):\n arr = np.array([True, False, True, True, True], dtype='O')\n result = lib.infer_dtype(arr)\n assert result == 'boolean'\n\n arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')\n result = lib.infer_dtype(arr)\n assert result == 'boolean'\n\n arr = np.array([True, False, True, 'foo'], dtype='O')\n result = lib.infer_dtype(arr)\n assert result == 'mixed'\n\n arr = np.array([True, False, True], dtype=bool)\n result = lib.infer_dtype(arr)\n assert result == 'boolean'\n\n def test_floats(self):\n arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')\n result = lib.infer_dtype(arr)\n assert result == 'floating'\n\n arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],\n dtype='O')\n result = lib.infer_dtype(arr)\n assert result == 'mixed-integer'\n\n arr = np.array([1, 2, 3, 4, 5], dtype='f4')\n result = lib.infer_dtype(arr)\n assert result == 'floating'\n\n arr = np.array([1, 2, 3, 4, 5], dtype='f8')\n result = lib.infer_dtype(arr)\n assert result == 'floating'\n\n def test_string(self):\n pass\n\n def test_unicode(self):\n pass\n\n def test_datetime(self):\n\n dates = [datetime(2012, 1, x) for x in range(1, 20)]\n index = Index(dates)\n assert index.inferred_type == 'datetime64'\n\n def test_infer_dtype_datetime(self):\n\n arr = np.array([Timestamp('2011-01-01'),\n Timestamp('2011-01-02')])\n assert lib.infer_dtype(arr) == 'datetime'\n\n arr = np.array([np.datetime64('2011-01-01'),\n np.datetime64('2011-01-01')], dtype=object)\n assert lib.infer_dtype(arr) == 'datetime64'\n\n arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])\n assert lib.infer_dtype(arr) == 'datetime'\n\n # starts with nan\n for n in [pd.NaT, np.nan]:\n arr = np.array([n, pd.Timestamp('2011-01-02')])\n assert lib.infer_dtype(arr) == 'datetime'\n\n arr = np.array([n, np.datetime64('2011-01-02')])\n assert lib.infer_dtype(arr) == 'datetime64'\n\n arr = np.array([n, datetime(2011, 1, 1)])\n assert lib.infer_dtype(arr) == 'datetime'\n\n arr = np.array([n, pd.Timestamp('2011-01-02'), n])\n assert lib.infer_dtype(arr) == 'datetime'\n\n arr = np.array([n, np.datetime64('2011-01-02'), n])\n assert lib.infer_dtype(arr) == 'datetime64'\n\n arr = np.array([n, datetime(2011, 1, 1), n])\n assert lib.infer_dtype(arr) == 'datetime'\n\n # different type of nat\n arr = np.array([np.timedelta64('nat'),\n np.datetime64('2011-01-02')], dtype=object)\n assert lib.infer_dtype(arr) == 'mixed'\n\n arr = np.array([np.datetime64('2011-01-02'),\n np.timedelta64('nat')], dtype=object)\n assert lib.infer_dtype(arr) == 'mixed'\n\n # mixed datetime\n arr = np.array([datetime(2011, 1, 1),\n pd.Timestamp('2011-01-02')])\n assert lib.infer_dtype(arr) == 'datetime'\n\n # should be datetime?\n arr = np.array([np.datetime64('2011-01-01'),\n pd.Timestamp('2011-01-02')])\n assert lib.infer_dtype(arr) == 'mixed'\n\n arr = np.array([pd.Timestamp('2011-01-02'),\n np.datetime64('2011-01-01')])\n assert lib.infer_dtype(arr) == 'mixed'\n\n arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])\n assert lib.infer_dtype(arr) == 'mixed-integer'\n\n arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])\n assert lib.infer_dtype(arr) == 'mixed'\n\n arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])\n assert lib.infer_dtype(arr) == 'mixed'\n\n def test_infer_dtype_timedelta(self):\n\n arr = np.array([pd.Timedelta('1 days'),\n pd.Timedelta('2 days')])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n arr = np.array([np.timedelta64(1, 'D'),\n np.timedelta64(2, 'D')], dtype=object)\n assert lib.infer_dtype(arr) == 'timedelta'\n\n arr = np.array([timedelta(1), timedelta(2)])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n # starts with nan\n for n in [pd.NaT, np.nan]:\n arr = np.array([n, Timedelta('1 days')])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n arr = np.array([n, np.timedelta64(1, 'D')])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n arr = np.array([n, timedelta(1)])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n arr = np.array([n, pd.Timedelta('1 days'), n])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n arr = np.array([n, np.timedelta64(1, 'D'), n])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n arr = np.array([n, timedelta(1), n])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n # different type of nat\n arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],\n dtype=object)\n assert lib.infer_dtype(arr) == 'mixed'\n\n arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],\n dtype=object)\n assert lib.infer_dtype(arr) == 'mixed'\n\n def test_infer_dtype_period(self):\n # GH 13664\n arr = np.array([pd.Period('2011-01', freq='D'),\n pd.Period('2011-02', freq='D')])\n assert lib.infer_dtype(arr) == 'period'\n\n arr = np.array([pd.Period('2011-01', freq='D'),\n pd.Period('2011-02', freq='M')])\n assert lib.infer_dtype(arr) == 'period'\n\n # starts with nan\n for n in [pd.NaT, np.nan]:\n arr = np.array([n, pd.Period('2011-01', freq='D')])\n assert lib.infer_dtype(arr) == 'period'\n\n arr = np.array([n, pd.Period('2011-01', freq='D'), n])\n assert lib.infer_dtype(arr) == 'period'\n\n # different type of nat\n arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],\n dtype=object)\n assert lib.infer_dtype(arr) == 'mixed'\n\n arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],\n dtype=object)\n assert lib.infer_dtype(arr) == 'mixed'\n\n def test_infer_dtype_all_nan_nat_like(self):\n arr = np.array([np.nan, np.nan])\n assert lib.infer_dtype(arr) == 'floating'\n\n # nan and None mix are result in mixed\n arr = np.array([np.nan, np.nan, None])\n assert lib.infer_dtype(arr) == 'mixed'\n\n arr = np.array([None, np.nan, np.nan])\n assert lib.infer_dtype(arr) == 'mixed'\n\n # pd.NaT\n arr = np.array([pd.NaT])\n assert lib.infer_dtype(arr) == 'datetime'\n\n arr = np.array([pd.NaT, np.nan])\n assert lib.infer_dtype(arr) == 'datetime'\n\n arr = np.array([np.nan, pd.NaT])\n assert lib.infer_dtype(arr) == 'datetime'\n\n arr = np.array([np.nan, pd.NaT, np.nan])\n assert lib.infer_dtype(arr) == 'datetime'\n\n arr = np.array([None, pd.NaT, None])\n assert lib.infer_dtype(arr) == 'datetime'\n\n # np.datetime64(nat)\n arr = np.array([np.datetime64('nat')])\n assert lib.infer_dtype(arr) == 'datetime64'\n\n for n in [np.nan, pd.NaT, None]:\n arr = np.array([n, np.datetime64('nat'), n])\n assert lib.infer_dtype(arr) == 'datetime64'\n\n arr = np.array([pd.NaT, n, np.datetime64('nat'), n])\n assert lib.infer_dtype(arr) == 'datetime64'\n\n arr = np.array([np.timedelta64('nat')], dtype=object)\n assert lib.infer_dtype(arr) == 'timedelta'\n\n for n in [np.nan, pd.NaT, None]:\n arr = np.array([n, np.timedelta64('nat'), n])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])\n assert lib.infer_dtype(arr) == 'timedelta'\n\n # datetime / timedelta mixed\n arr = np.array([pd.NaT, np.datetime64('nat'),\n np.timedelta64('nat'), np.nan])\n assert lib.infer_dtype(arr) == 'mixed'\n\n arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],\n dtype=object)\n assert lib.infer_dtype(arr) == 'mixed'\n\n def test_is_datetimelike_array_all_nan_nat_like(self):\n arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])\n assert lib.is_datetime_array(arr)\n assert lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_array(arr)\n assert not lib.is_timedelta64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert lib.is_timedelta_array(arr)\n assert lib.is_timedelta64_array(arr)\n assert lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),\n np.timedelta64('nat')])\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_array(arr)\n assert not lib.is_timedelta64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT])\n assert lib.is_datetime_array(arr)\n assert lib.is_datetime64_array(arr)\n assert lib.is_timedelta_array(arr)\n assert lib.is_timedelta64_array(arr)\n assert lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, np.nan], dtype=object)\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_array(arr)\n assert not lib.is_timedelta64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n def test_date(self):\n\n dates = [date(2012, 1, x) for x in range(1, 20)]\n index = Index(dates)\n assert index.inferred_type == 'date'\n\n def test_to_object_array_tuples(self):\n r = (5, 6)\n values = [r]\n result = lib.to_object_array_tuples(values)\n\n try:\n # make sure record array works\n from collections import namedtuple\n record = namedtuple('record', 'x y')\n r = record(5, 6)\n values = [r]\n result = lib.to_object_array_tuples(values) # noqa\n except ImportError:\n pass\n\n def test_object(self):\n\n # GH 7431\n # cannot infer more than this as only a single element\n arr = np.array([None], dtype='O')\n result = lib.infer_dtype(arr)\n assert result == 'mixed'\n\n def test_to_object_array_width(self):\n # see gh-13320\n rows = [[1, 2, 3], [4, 5, 6]]\n\n expected = np.array(rows, dtype=object)\n out = lib.to_object_array(rows)\n tm.assert_numpy_array_equal(out, expected)\n\n expected = np.array(rows, dtype=object)\n out = lib.to_object_array(rows, min_width=1)\n tm.assert_numpy_array_equal(out, expected)\n\n expected = np.array([[1, 2, 3, None, None],\n [4, 5, 6, None, None]], dtype=object)\n out = lib.to_object_array(rows, min_width=5)\n tm.assert_numpy_array_equal(out, expected)\n\n def test_is_period(self):\n assert lib.is_period(pd.Period('2011-01', freq='M'))\n assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))\n assert not lib.is_period(pd.Timestamp('2011-01'))\n assert not lib.is_period(1)\n assert not lib.is_period(np.nan)\n\n def test_categorical(self):\n\n # GH 8974\n from pandas import Categorical, Series\n arr = Categorical(list('abc'))\n result = lib.infer_dtype(arr)\n assert result == 'categorical'\n\n result = lib.infer_dtype(Series(arr))\n assert result == 'categorical'\n\n arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)\n result = lib.infer_dtype(arr)\n assert result == 'categorical'\n\n result = lib.infer_dtype(Series(arr))\n assert result == 'categorical'\n\n\nclass TestNumberScalar(object):\n\n def test_is_number(self):\n\n assert is_number(True)\n assert is_number(1)\n assert is_number(1.1)\n assert is_number(1 + 3j)\n assert is_number(np.bool(False))\n assert is_number(np.int64(1))\n assert is_number(np.float64(1.1))\n assert is_number(np.complex128(1 + 3j))\n assert is_number(np.nan)\n\n assert not is_number(None)\n assert not is_number('x')\n assert not is_number(datetime(2011, 1, 1))\n assert not is_number(np.datetime64('2011-01-01'))\n assert not is_number(Timestamp('2011-01-01'))\n assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))\n assert not is_number(timedelta(1000))\n assert not is_number(Timedelta('1 days'))\n\n # questionable\n assert not is_number(np.bool_(False))\n assert is_number(np.timedelta64(1, 'D'))\n\n def test_is_bool(self):\n assert is_bool(True)\n assert is_bool(np.bool(False))\n assert is_bool(np.bool_(False))\n\n assert not is_bool(1)\n assert not is_bool(1.1)\n assert not is_bool(1 + 3j)\n assert not is_bool(np.int64(1))\n assert not is_bool(np.float64(1.1))\n assert not is_bool(np.complex128(1 + 3j))\n assert not is_bool(np.nan)\n assert not is_bool(None)\n assert not is_bool('x')\n assert not is_bool(datetime(2011, 1, 1))\n assert not is_bool(np.datetime64('2011-01-01'))\n assert not is_bool(Timestamp('2011-01-01'))\n assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))\n assert not is_bool(timedelta(1000))\n assert not is_bool(np.timedelta64(1, 'D'))\n assert not is_bool(Timedelta('1 days'))\n\n def test_is_integer(self):\n assert is_integer(1)\n assert is_integer(np.int64(1))\n\n assert not is_integer(True)\n assert not is_integer(1.1)\n assert not is_integer(1 + 3j)\n assert not is_integer(np.bool(False))\n assert not is_integer(np.bool_(False))\n assert not is_integer(np.float64(1.1))\n assert not is_integer(np.complex128(1 + 3j))\n assert not is_integer(np.nan)\n assert not is_integer(None)\n assert not is_integer('x')\n assert not is_integer(datetime(2011, 1, 1))\n assert not is_integer(np.datetime64('2011-01-01'))\n assert not is_integer(Timestamp('2011-01-01'))\n assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))\n assert not is_integer(timedelta(1000))\n assert not is_integer(Timedelta('1 days'))\n\n # questionable\n assert is_integer(np.timedelta64(1, 'D'))\n\n def test_is_float(self):\n assert is_float(1.1)\n assert is_float(np.float64(1.1))\n assert is_float(np.nan)\n\n assert not is_float(True)\n assert not is_float(1)\n assert not is_float(1 + 3j)\n assert not is_float(np.bool(False))\n assert not is_float(np.bool_(False))\n assert not is_float(np.int64(1))\n assert not is_float(np.complex128(1 + 3j))\n assert not is_float(None)\n assert not is_float('x')\n assert not is_float(datetime(2011, 1, 1))\n assert not is_float(np.datetime64('2011-01-01'))\n assert not is_float(Timestamp('2011-01-01'))\n assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))\n assert not is_float(timedelta(1000))\n assert not is_float(np.timedelta64(1, 'D'))\n assert not is_float(Timedelta('1 days'))\n\n def test_is_datetime_dtypes(self):\n\n ts = pd.date_range('20130101', periods=3)\n tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')\n\n assert is_datetime64_dtype('datetime64')\n assert is_datetime64_dtype('datetime64[ns]')\n assert is_datetime64_dtype(ts)\n assert not is_datetime64_dtype(tsa)\n\n assert not is_datetime64_ns_dtype('datetime64')\n assert is_datetime64_ns_dtype('datetime64[ns]')\n assert is_datetime64_ns_dtype(ts)\n assert is_datetime64_ns_dtype(tsa)\n\n assert is_datetime64_any_dtype('datetime64')\n assert is_datetime64_any_dtype('datetime64[ns]')\n assert is_datetime64_any_dtype(ts)\n assert is_datetime64_any_dtype(tsa)\n\n assert not is_datetime64tz_dtype('datetime64')\n assert not is_datetime64tz_dtype('datetime64[ns]')\n assert not is_datetime64tz_dtype(ts)\n assert is_datetime64tz_dtype(tsa)\n\n for tz in ['US/Eastern', 'UTC']:\n dtype = 'datetime64[ns, {}]'.format(tz)\n assert not is_datetime64_dtype(dtype)\n assert is_datetime64tz_dtype(dtype)\n assert is_datetime64_ns_dtype(dtype)\n assert is_datetime64_any_dtype(dtype)\n\n def test_is_timedelta(self):\n assert is_timedelta64_dtype('timedelta64')\n assert is_timedelta64_dtype('timedelta64[ns]')\n assert not is_timedelta64_ns_dtype('timedelta64')\n assert is_timedelta64_ns_dtype('timedelta64[ns]')\n\n tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64')\n assert is_timedelta64_dtype(tdi)\n assert is_timedelta64_ns_dtype(tdi)\n assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))\n\n # Conversion to Int64Index:\n assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))\n assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))\n\n\nclass Testisscalar(object):\n\n def test_isscalar_builtin_scalars(self):\n assert is_scalar(None)\n assert is_scalar(True)\n assert is_scalar(False)\n assert is_scalar(0.)\n assert is_scalar(np.nan)\n assert is_scalar('foobar')\n assert is_scalar(b'foobar')\n assert is_scalar(u('efoobar'))\n assert is_scalar(datetime(2014, 1, 1))\n assert is_scalar(date(2014, 1, 1))\n assert is_scalar(time(12, 0))\n assert is_scalar(timedelta(hours=1))\n assert is_scalar(pd.NaT)\n\n def test_isscalar_builtin_nonscalars(self):\n assert not is_scalar({})\n assert not is_scalar([])\n assert not is_scalar([1])\n assert not is_scalar(())\n assert not is_scalar((1, ))\n assert not is_scalar(slice(None))\n assert not is_scalar(Ellipsis)\n\n def test_isscalar_numpy_array_scalars(self):\n assert is_scalar(np.int64(1))\n assert is_scalar(np.float64(1.))\n assert is_scalar(np.int32(1))\n assert is_scalar(np.object_('foobar'))\n assert is_scalar(np.str_('foobar'))\n assert is_scalar(np.unicode_(u('foobar')))\n assert is_scalar(np.bytes_(b'foobar'))\n assert is_scalar(np.datetime64('2014-01-01'))\n assert is_scalar(np.timedelta64(1, 'h'))\n\n def test_isscalar_numpy_zerodim_arrays(self):\n for zerodim in [np.array(1), np.array('foobar'),\n np.array(np.datetime64('2014-01-01')),\n np.array(np.timedelta64(1, 'h')),\n np.array(np.datetime64('NaT'))]:\n assert not is_scalar(zerodim)\n assert is_scalar(lib.item_from_zerodim(zerodim))\n\n def test_isscalar_numpy_arrays(self):\n assert not is_scalar(np.array([]))\n assert not is_scalar(np.array([[]]))\n assert not is_scalar(np.matrix('1; 2'))\n\n def test_isscalar_pandas_scalars(self):\n assert is_scalar(Timestamp('2014-01-01'))\n assert is_scalar(Timedelta(hours=1))\n assert is_scalar(Period('2014-01-01'))\n\n def test_lisscalar_pandas_containers(self):\n assert not is_scalar(Series())\n assert not is_scalar(Series([1]))\n assert not is_scalar(DataFrame())\n assert not is_scalar(DataFrame([[1]]))\n with catch_warnings(record=True):\n assert not is_scalar(Panel())\n assert not is_scalar(Panel([[[1]]]))\n assert not is_scalar(Index([]))\n assert not is_scalar(Index([1]))\n\n\ndef test_datetimeindex_from_empty_datetime64_array():\n for unit in ['ms', 'us', 'ns']:\n idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))\n assert (len(idx) == 0)\n\n\ndef test_nan_to_nat_conversions():\n\n df = DataFrame(dict({\n 'A': np.asarray(\n lrange(10), dtype='float64'),\n 'B': Timestamp('20010101')\n }))\n df.iloc[3:6, :] = np.nan\n result = df.loc[4, 'B'].value\n assert (result == tslib.iNaT)\n\n s = df['B'].copy()\n s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)\n assert (isnull(s[8]))\n\n # numpy < 1.7.0 is wrong\n from distutils.version import LooseVersion\n if LooseVersion(np.__version__) >= '1.7.0':\n assert (s[8].value == np.datetime64('NaT').astype(np.int64))\n\n\ndef test_is_scipy_sparse(spmatrix): # noqa: F811\n tm._skip_if_no_scipy()\n assert is_scipy_sparse(spmatrix([[0, 1]]))\n assert not is_scipy_sparse(np.array([1]))\n\n\ndef test_ensure_int32():\n values = np.arange(10, dtype=np.int32)\n result = _ensure_int32(values)\n assert (result.dtype == np.int32)\n\n values = np.arange(10, dtype=np.int64)\n result = _ensure_int32(values)\n assert (result.dtype == np.int32)\n\n\ndef test_ensure_categorical():\n values = np.arange(10, dtype=np.int32)\n result = _ensure_categorical(values)\n assert (result.dtype == 'category')\n\n values = Categorical(values)\n result = _ensure_categorical(values)\n tm.assert_categorical_equal(result, values)\n",
"# -*- coding: utf-8 -*-\nfrom pandas import compat\n\nimport pytest\n\nfrom distutils.version import LooseVersion\nfrom numpy import nan\nimport numpy as np\n\nfrom pandas import (Series, date_range, NaT)\n\nfrom pandas.compat import product\nfrom pandas.util.testing import assert_series_equal\nimport pandas.util.testing as tm\nfrom pandas.tests.series.common import TestData\n\n\nclass TestSeriesRank(TestData):\n s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])\n\n results = {\n 'average': np.array([1.5, 5.5, 7.0, 3.5, nan,\n 3.5, 1.5, 8.0, nan, 5.5]),\n 'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),\n 'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),\n 'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),\n 'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),\n }\n\n def test_rank(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.ts[::2] = np.nan\n self.ts[:10][::3] = 4.\n\n ranks = self.ts.rank()\n oranks = self.ts.astype('O').rank()\n\n assert_series_equal(ranks, oranks)\n\n mask = np.isnan(self.ts)\n filled = self.ts.fillna(np.inf)\n\n # rankdata returns a ndarray\n exp = Series(rankdata(filled), index=filled.index, name='ts')\n exp[mask] = np.nan\n\n tm.assert_series_equal(ranks, exp)\n\n iseries = Series(np.arange(5).repeat(2))\n\n iranks = iseries.rank()\n exp = iseries.astype(float).rank()\n assert_series_equal(iranks, exp)\n iseries = Series(np.arange(5)) + 1.0\n exp = iseries / 5.0\n iranks = iseries.rank(pct=True)\n\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(1, 100))\n exp = Series(np.repeat(0.505, 100))\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries[1] = np.nan\n exp = Series(np.repeat(50.0 / 99.0, 100))\n exp[1] = np.nan\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1.0\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(np.nan, 100))\n exp = iseries.copy()\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n rng = date_range('1/1/1990', periods=5)\n iseries = Series(np.arange(5), rng) + 1\n iseries.iloc[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])\n exp = Series([2, 1, 3, 5, 4, 6.0])\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n # GH 5968\n iseries = Series(['3 day', '1 day 10m', '-2 day', NaT],\n dtype='m8[ns]')\n exp = Series([3, 2, 1, np.nan])\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n values = np.array(\n [-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40\n ], dtype='float64')\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype='float64')\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n def test_rank_categorical(self):\n # GH issue #15420 rank incorrectly orders ordered categories\n\n # Test ascending/descending ranking for ordered categoricals\n exp = Series([1., 2., 3., 4., 5., 6.])\n exp_desc = Series([6., 5., 4., 3., 2., 1.])\n ordered = Series(\n ['first', 'second', 'third', 'fourth', 'fifth', 'sixth']\n ).astype(\n 'category',\n categories=['first', 'second', 'third',\n 'fourth', 'fifth', 'sixth'],\n ordered=True\n )\n assert_series_equal(ordered.rank(), exp)\n assert_series_equal(ordered.rank(ascending=False), exp_desc)\n\n # Unordered categoricals should be ranked as objects\n unordered = Series(\n ['first', 'second', 'third', 'fourth', 'fifth', 'sixth'],\n ).astype(\n 'category',\n categories=['first', 'second', 'third',\n 'fourth', 'fifth', 'sixth'],\n ordered=False\n )\n exp_unordered = Series([2., 4., 6., 3., 1., 5.])\n res = unordered.rank()\n assert_series_equal(res, exp_unordered)\n\n unordered1 = Series(\n [1, 2, 3, 4, 5, 6],\n ).astype(\n 'category',\n categories=[1, 2, 3, 4, 5, 6],\n ordered=False\n )\n exp_unordered1 = Series([1., 2., 3., 4., 5., 6.])\n res1 = unordered1.rank()\n assert_series_equal(res1, exp_unordered1)\n\n # Test na_option for rank data\n na_ser = Series(\n ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', np.NaN]\n ).astype(\n 'category',\n categories=[\n 'first', 'second', 'third', 'fourth',\n 'fifth', 'sixth', 'seventh'\n ],\n ordered=True\n )\n\n exp_top = Series([2., 3., 4., 5., 6., 7., 1.])\n exp_bot = Series([1., 2., 3., 4., 5., 6., 7.])\n exp_keep = Series([1., 2., 3., 4., 5., 6., np.NaN])\n\n assert_series_equal(na_ser.rank(na_option='top'), exp_top)\n assert_series_equal(na_ser.rank(na_option='bottom'), exp_bot)\n assert_series_equal(na_ser.rank(na_option='keep'), exp_keep)\n\n # Test na_option for rank data with ascending False\n exp_top = Series([7., 6., 5., 4., 3., 2., 1.])\n exp_bot = Series([6., 5., 4., 3., 2., 1., 7.])\n exp_keep = Series([6., 5., 4., 3., 2., 1., np.NaN])\n\n assert_series_equal(\n na_ser.rank(na_option='top', ascending=False),\n exp_top\n )\n assert_series_equal(\n na_ser.rank(na_option='bottom', ascending=False),\n exp_bot\n )\n assert_series_equal(\n na_ser.rank(na_option='keep', ascending=False),\n exp_keep\n )\n\n # Test with pct=True\n na_ser = Series(\n ['first', 'second', 'third', 'fourth', np.NaN],\n ).astype(\n 'category',\n categories=['first', 'second', 'third', 'fourth'],\n ordered=True\n )\n exp_top = Series([0.4, 0.6, 0.8, 1., 0.2])\n exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.])\n exp_keep = Series([0.25, 0.5, 0.75, 1., np.NaN])\n\n assert_series_equal(na_ser.rank(na_option='top', pct=True), exp_top)\n assert_series_equal(na_ser.rank(na_option='bottom', pct=True), exp_bot)\n assert_series_equal(na_ser.rank(na_option='keep', pct=True), exp_keep)\n\n def test_rank_signature(self):\n s = Series([0, 1])\n s.rank(method='average')\n pytest.raises(ValueError, s.rank, 'average')\n\n def test_rank_inf(self):\n pytest.skip('DataFrame.rank does not currently rank '\n 'np.inf and -np.inf properly')\n\n values = np.array(\n [-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10,\n 2, 40, np.inf], dtype='float64')\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype='float64')\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n def test_rank_tie_methods(self):\n s = self.s\n\n def _check(s, expected, method='average'):\n result = s.rank(method=method)\n tm.assert_series_equal(result, Series(expected))\n\n dtypes = [None, object]\n disabled = set([(object, 'first')])\n results = self.results\n\n for method, dtype in product(results, dtypes):\n if (dtype, method) in disabled:\n continue\n series = s if dtype is None else s.astype(dtype)\n _check(series, results[method], method=method)\n\n def test_rank_methods_series(self):\n tm.skip_if_no_package('scipy', min_version='0.13',\n app='scipy.stats.rankdata')\n import scipy\n from scipy.stats import rankdata\n\n xs = np.random.randn(9)\n xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates\n np.random.shuffle(xs)\n\n index = [chr(ord('a') + i) for i in range(len(xs))]\n\n for vals in [xs, xs + 1e6, xs * 1e-6]:\n ts = Series(vals, index=index)\n\n for m in ['average', 'min', 'max', 'first', 'dense']:\n result = ts.rank(method=m)\n sprank = rankdata(vals, m if m != 'first' else 'ordinal')\n expected = Series(sprank, index=index)\n\n if LooseVersion(scipy.__version__) >= '0.17.0':\n expected = expected.astype('float64')\n tm.assert_series_equal(result, expected)\n\n def test_rank_dense_method(self):\n dtypes = ['O', 'f8', 'i8']\n in_out = [([1], [1]),\n ([2], [1]),\n ([0], [1]),\n ([2, 2], [1, 1]),\n ([1, 2, 3], [1, 2, 3]),\n ([4, 2, 1], [3, 2, 1],),\n ([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),\n ([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5])]\n\n for ser, exp in in_out:\n for dtype in dtypes:\n s = Series(ser).astype(dtype)\n result = s.rank(method='dense')\n expected = Series(exp).astype(result.dtype)\n assert_series_equal(result, expected)\n\n def test_rank_descending(self):\n dtypes = ['O', 'f8', 'i8']\n\n for dtype, method in product(dtypes, self.results):\n if 'i' in dtype:\n s = self.s.dropna()\n else:\n s = self.s.astype(dtype)\n\n res = s.rank(ascending=False)\n expected = (s.max() - s).rank()\n assert_series_equal(res, expected)\n\n if method == 'first' and dtype == 'O':\n continue\n\n expected = (s.max() - s).rank(method=method)\n res2 = s.rank(method=method, ascending=False)\n assert_series_equal(res2, expected)\n\n def test_rank_int(self):\n s = self.s.dropna().astype('i8')\n\n for method, res in compat.iteritems(self.results):\n result = s.rank(method=method)\n expected = Series(res).dropna()\n expected.index = result.index\n assert_series_equal(result, expected)\n\n def test_rank_object_bug(self):\n # GH 13445\n\n # smoke tests\n Series([np.nan] * 32).astype(object).rank(ascending=True)\n Series([np.nan] * 32).astype(object).rank(ascending=False)\n",
"\"\"\"Test functions for fftpack.helper module\n\nCopied from fftpack.helper by Pearu Peterson, October 2005\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom numpy.testing import TestCase, run_module_suite, assert_array_almost_equal\nfrom numpy import fft\nfrom numpy import pi\nfrom numpy.fft.helper import _FFTCache\n\n\nclass TestFFTShift(TestCase):\n\n def test_definition(self):\n x = [0, 1, 2, 3, 4, -4, -3, -2, -1]\n y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]\n assert_array_almost_equal(fft.fftshift(x), y)\n assert_array_almost_equal(fft.ifftshift(y), x)\n x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]\n y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\n assert_array_almost_equal(fft.fftshift(x), y)\n assert_array_almost_equal(fft.ifftshift(y), x)\n\n def test_inverse(self):\n for n in [1, 4, 9, 100, 211]:\n x = np.random.random((n,))\n assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)\n\n def test_axes_keyword(self):\n freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]\n shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]\n assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)\n assert_array_almost_equal(fft.fftshift(freqs, axes=0),\n fft.fftshift(freqs, axes=(0,)))\n assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)\n assert_array_almost_equal(fft.ifftshift(shifted, axes=0),\n fft.ifftshift(shifted, axes=(0,)))\n\n\nclass TestFFTFreq(TestCase):\n\n def test_definition(self):\n x = [0, 1, 2, 3, 4, -4, -3, -2, -1]\n assert_array_almost_equal(9*fft.fftfreq(9), x)\n assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)\n x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]\n assert_array_almost_equal(10*fft.fftfreq(10), x)\n assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)\n\n\nclass TestRFFTFreq(TestCase):\n\n def test_definition(self):\n x = [0, 1, 2, 3, 4]\n assert_array_almost_equal(9*fft.rfftfreq(9), x)\n assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)\n x = [0, 1, 2, 3, 4, 5]\n assert_array_almost_equal(10*fft.rfftfreq(10), x)\n assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)\n\n\nclass TestIRFFTN(TestCase):\n\n def test_not_last_axis_success(self):\n ar, ai = np.random.random((2, 16, 8, 32))\n a = ar + 1j*ai\n\n axes = (-2,)\n\n # Should not raise error\n fft.irfftn(a, axes=axes)\n\n\nclass TestFFTCache(TestCase):\n\n def test_basic_behaviour(self):\n c = _FFTCache(max_size_in_mb=1, max_item_count=4)\n\n # Put\n c.put_twiddle_factors(1, np.ones(2, dtype=np.float32))\n c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32))\n\n # Get\n assert_array_almost_equal(c.pop_twiddle_factors(1),\n np.ones(2, dtype=np.float32))\n assert_array_almost_equal(c.pop_twiddle_factors(2),\n np.zeros(2, dtype=np.float32))\n\n # Nothing should be left.\n self.assertEqual(len(c._dict), 0)\n\n # Now put everything in twice so it can be retrieved once and each will\n # still have one item left.\n for _ in range(2):\n c.put_twiddle_factors(1, np.ones(2, dtype=np.float32))\n c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32))\n assert_array_almost_equal(c.pop_twiddle_factors(1),\n np.ones(2, dtype=np.float32))\n assert_array_almost_equal(c.pop_twiddle_factors(2),\n np.zeros(2, dtype=np.float32))\n self.assertEqual(len(c._dict), 2)\n\n def test_automatic_pruning(self):\n # That's around 2600 single precision samples.\n c = _FFTCache(max_size_in_mb=0.01, max_item_count=4)\n\n c.put_twiddle_factors(1, np.ones(200, dtype=np.float32))\n c.put_twiddle_factors(2, np.ones(200, dtype=np.float32))\n self.assertEqual(list(c._dict.keys()), [1, 2])\n\n # This is larger than the limit but should still be kept.\n c.put_twiddle_factors(3, np.ones(3000, dtype=np.float32))\n self.assertEqual(list(c._dict.keys()), [1, 2, 3])\n # Add one more.\n c.put_twiddle_factors(4, np.ones(3000, dtype=np.float32))\n # The other three should no longer exist.\n self.assertEqual(list(c._dict.keys()), [4])\n\n # Now test the max item count pruning.\n c = _FFTCache(max_size_in_mb=0.01, max_item_count=2)\n c.put_twiddle_factors(2, np.empty(2))\n c.put_twiddle_factors(1, np.empty(2))\n # Can still be accessed.\n self.assertEqual(list(c._dict.keys()), [2, 1])\n\n c.put_twiddle_factors(3, np.empty(2))\n # 1 and 3 can still be accessed - c[2] has been touched least recently\n # and is thus evicted.\n self.assertEqual(list(c._dict.keys()), [1, 3])\n\n # One last test. We will add a single large item that is slightly\n # bigger then the cache size. Some small items can still be added.\n c = _FFTCache(max_size_in_mb=0.01, max_item_count=5)\n c.put_twiddle_factors(1, np.ones(3000, dtype=np.float32))\n c.put_twiddle_factors(2, np.ones(2, dtype=np.float32))\n c.put_twiddle_factors(3, np.ones(2, dtype=np.float32))\n c.put_twiddle_factors(4, np.ones(2, dtype=np.float32))\n self.assertEqual(list(c._dict.keys()), [1, 2, 3, 4])\n\n # One more big item. This time it is 6 smaller ones but they are\n # counted as one big item.\n for _ in range(6):\n c.put_twiddle_factors(5, np.ones(500, dtype=np.float32))\n # '1' no longer in the cache. Rest still in the cache.\n self.assertEqual(list(c._dict.keys()), [2, 3, 4, 5])\n\n # Another big item - should now be the only item in the cache.\n c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32))\n self.assertEqual(list(c._dict.keys()), [6])\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"#\n# Created by: Pearu Peterson, March 2002\n#\n\"\"\" Test functions for linalg.basic module\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\nimport itertools\nimport numpy as np\nfrom numpy import (arange, array, dot, zeros, identity, conjugate, transpose,\n float32)\nimport numpy.linalg as linalg\nfrom numpy.random import random\n\nfrom numpy.testing import (assert_equal, assert_almost_equal, assert_,\n assert_array_almost_equal, assert_allclose,\n assert_array_equal)\nimport pytest\nfrom pytest import raises as assert_raises\nfrom scipy._lib._numpy_compat import suppress_warnings\n\nfrom scipy.linalg import (solve, inv, det, lstsq, pinv, pinv2, pinvh, norm,\n solve_banded, solveh_banded, solve_triangular,\n solve_circulant, circulant, LinAlgError, block_diag,\n matrix_balance)\n\nfrom scipy.linalg.basic import LstsqLapackError\nfrom scipy.linalg._testutils import assert_no_overwrite\n\nfrom scipy._lib._version import NumpyVersion\n\n\n\"\"\"\nBugs:\n1) solve.check_random_sym_complex fails if a is complex\n and transpose(a) = conjugate(a) (a is Hermitian).\n\"\"\"\n__usage__ = \"\"\"\nBuild linalg:\n python setup_linalg.py build\nRun tests if scipy is installed:\n python -c 'import scipy;scipy.linalg.test()'\nRun tests if linalg is not installed:\n python tests/test_basic.py\n\"\"\"\n\nREAL_DTYPES = [np.float32, np.float64, np.longdouble]\nCOMPLEX_DTYPES = [np.complex64, np.complex128, np.clongdouble]\nDTYPES = REAL_DTYPES + COMPLEX_DTYPES\n\n\ndef _eps_cast(dtyp):\n \"\"\"Get the epsilon for dtype, possibly downcast to BLAS types.\"\"\"\n dt = dtyp\n if dt == np.longdouble:\n dt = np.float64\n elif dt == np.clongdouble:\n dt = np.complex128\n return np.finfo(dt).eps\n\n\nclass TestSolveBanded(object):\n\n def test_real(self):\n a = array([[1.0, 20, 0, 0],\n [-30, 4, 6, 0],\n [2, 1, 20, 2],\n [0, -1, 7, 14]])\n ab = array([[0.0, 20, 6, 2],\n [1, 4, 20, 14],\n [-30, 1, 7, 0],\n [2, -1, 0, 0]])\n l, u = 2, 1\n b4 = array([10.0, 0.0, 2.0, 14.0])\n b4by1 = b4.reshape(-1, 1)\n b4by2 = array([[2, 1],\n [-30, 4],\n [2, 3],\n [1, 3]])\n b4by4 = array([[1, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 1, 0, 0],\n [0, 1, 0, 0]])\n for b in [b4, b4by1, b4by2, b4by4]:\n x = solve_banded((l, u), ab, b)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_complex(self):\n a = array([[1.0, 20, 0, 0],\n [-30, 4, 6, 0],\n [2j, 1, 20, 2j],\n [0, -1, 7, 14]])\n ab = array([[0.0, 20, 6, 2j],\n [1, 4, 20, 14],\n [-30, 1, 7, 0],\n [2j, -1, 0, 0]])\n l, u = 2, 1\n b4 = array([10.0, 0.0, 2.0, 14.0j])\n b4by1 = b4.reshape(-1, 1)\n b4by2 = array([[2, 1],\n [-30, 4],\n [2, 3],\n [1, 3]])\n b4by4 = array([[1, 0, 0, 0],\n [0, 0, 0, 1j],\n [0, 1, 0, 0],\n [0, 1, 0, 0]])\n for b in [b4, b4by1, b4by2, b4by4]:\n x = solve_banded((l, u), ab, b)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_tridiag_real(self):\n ab = array([[0.0, 20, 6, 2],\n [1, 4, 20, 14],\n [-30, 1, 7, 0]])\n a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(\n ab[2, :-1], -1)\n b4 = array([10.0, 0.0, 2.0, 14.0])\n b4by1 = b4.reshape(-1, 1)\n b4by2 = array([[2, 1],\n [-30, 4],\n [2, 3],\n [1, 3]])\n b4by4 = array([[1, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 1, 0, 0],\n [0, 1, 0, 0]])\n for b in [b4, b4by1, b4by2, b4by4]:\n x = solve_banded((1, 1), ab, b)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_tridiag_complex(self):\n ab = array([[0.0, 20, 6, 2j],\n [1, 4, 20, 14],\n [-30, 1, 7, 0]])\n a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(\n ab[2, :-1], -1)\n b4 = array([10.0, 0.0, 2.0, 14.0j])\n b4by1 = b4.reshape(-1, 1)\n b4by2 = array([[2, 1],\n [-30, 4],\n [2, 3],\n [1, 3]])\n b4by4 = array([[1, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 1, 0, 0],\n [0, 1, 0, 0]])\n for b in [b4, b4by1, b4by2, b4by4]:\n x = solve_banded((1, 1), ab, b)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_check_finite(self):\n a = array([[1.0, 20, 0, 0],\n [-30, 4, 6, 0],\n [2, 1, 20, 2],\n [0, -1, 7, 14]])\n ab = array([[0.0, 20, 6, 2],\n [1, 4, 20, 14],\n [-30, 1, 7, 0],\n [2, -1, 0, 0]])\n l, u = 2, 1\n b4 = array([10.0, 0.0, 2.0, 14.0])\n x = solve_banded((l, u), ab, b4, check_finite=False)\n assert_array_almost_equal(dot(a, x), b4)\n\n def test_bad_shape(self):\n ab = array([[0.0, 20, 6, 2],\n [1, 4, 20, 14],\n [-30, 1, 7, 0],\n [2, -1, 0, 0]])\n l, u = 2, 1\n bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4)\n assert_raises(ValueError, solve_banded, (l, u), ab, bad)\n assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])\n\n # Values of (l,u) are not compatible with ab.\n assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])\n\n def test_1x1(self):\n b = array([[1., 2., 3.]])\n x = solve_banded((1, 1), [[0], [2], [0]], b)\n assert_array_equal(x, [[0.5, 1.0, 1.5]])\n assert_equal(x.dtype, np.dtype('f8'))\n assert_array_equal(b, [[1.0, 2.0, 3.0]])\n\n def test_native_list_arguments(self):\n a = [[1.0, 20, 0, 0],\n [-30, 4, 6, 0],\n [2, 1, 20, 2],\n [0, -1, 7, 14]]\n ab = [[0.0, 20, 6, 2],\n [1, 4, 20, 14],\n [-30, 1, 7, 0],\n [2, -1, 0, 0]]\n l, u = 2, 1\n b = [10.0, 0.0, 2.0, 14.0]\n x = solve_banded((l, u), ab, b)\n assert_array_almost_equal(dot(a, x), b)\n\n\nclass TestSolveHBanded(object):\n\n def test_01_upper(self):\n # Solve\n # [ 4 1 2 0] [1]\n # [ 1 4 1 2] X = [4]\n # [ 2 1 4 1] [1]\n # [ 0 2 1 4] [2]\n # with the RHS as a 1D array.\n ab = array([[0.0, 0.0, 2.0, 2.0],\n [-99, 1.0, 1.0, 1.0],\n [4.0, 4.0, 4.0, 4.0]])\n b = array([1.0, 4.0, 1.0, 2.0])\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])\n\n def test_02_upper(self):\n # Solve\n # [ 4 1 2 0] [1 6]\n # [ 1 4 1 2] X = [4 2]\n # [ 2 1 4 1] [1 6]\n # [ 0 2 1 4] [2 1]\n #\n ab = array([[0.0, 0.0, 2.0, 2.0],\n [-99, 1.0, 1.0, 1.0],\n [4.0, 4.0, 4.0, 4.0]])\n b = array([[1.0, 6.0],\n [4.0, 2.0],\n [1.0, 6.0],\n [2.0, 1.0]])\n x = solveh_banded(ab, b)\n expected = array([[0.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0],\n [0.0, 0.0]])\n assert_array_almost_equal(x, expected)\n\n def test_03_upper(self):\n # Solve\n # [ 4 1 2 0] [1]\n # [ 1 4 1 2] X = [4]\n # [ 2 1 4 1] [1]\n # [ 0 2 1 4] [2]\n # with the RHS as a 2D array with shape (3,1).\n ab = array([[0.0, 0.0, 2.0, 2.0],\n [-99, 1.0, 1.0, 1.0],\n [4.0, 4.0, 4.0, 4.0]])\n b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1)\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1))\n\n def test_01_lower(self):\n # Solve\n # [ 4 1 2 0] [1]\n # [ 1 4 1 2] X = [4]\n # [ 2 1 4 1] [1]\n # [ 0 2 1 4] [2]\n #\n ab = array([[4.0, 4.0, 4.0, 4.0],\n [1.0, 1.0, 1.0, -99],\n [2.0, 2.0, 0.0, 0.0]])\n b = array([1.0, 4.0, 1.0, 2.0])\n x = solveh_banded(ab, b, lower=True)\n assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])\n\n def test_02_lower(self):\n # Solve\n # [ 4 1 2 0] [1 6]\n # [ 1 4 1 2] X = [4 2]\n # [ 2 1 4 1] [1 6]\n # [ 0 2 1 4] [2 1]\n #\n ab = array([[4.0, 4.0, 4.0, 4.0],\n [1.0, 1.0, 1.0, -99],\n [2.0, 2.0, 0.0, 0.0]])\n b = array([[1.0, 6.0],\n [4.0, 2.0],\n [1.0, 6.0],\n [2.0, 1.0]])\n x = solveh_banded(ab, b, lower=True)\n expected = array([[0.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0],\n [0.0, 0.0]])\n assert_array_almost_equal(x, expected)\n\n def test_01_float32(self):\n # Solve\n # [ 4 1 2 0] [1]\n # [ 1 4 1 2] X = [4]\n # [ 2 1 4 1] [1]\n # [ 0 2 1 4] [2]\n #\n ab = array([[0.0, 0.0, 2.0, 2.0],\n [-99, 1.0, 1.0, 1.0],\n [4.0, 4.0, 4.0, 4.0]], dtype=float32)\n b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])\n\n def test_02_float32(self):\n # Solve\n # [ 4 1 2 0] [1 6]\n # [ 1 4 1 2] X = [4 2]\n # [ 2 1 4 1] [1 6]\n # [ 0 2 1 4] [2 1]\n #\n ab = array([[0.0, 0.0, 2.0, 2.0],\n [-99, 1.0, 1.0, 1.0],\n [4.0, 4.0, 4.0, 4.0]], dtype=float32)\n b = array([[1.0, 6.0],\n [4.0, 2.0],\n [1.0, 6.0],\n [2.0, 1.0]], dtype=float32)\n x = solveh_banded(ab, b)\n expected = array([[0.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0],\n [0.0, 0.0]])\n assert_array_almost_equal(x, expected)\n\n def test_01_complex(self):\n # Solve\n # [ 4 -j 2 0] [2-j]\n # [ j 4 -j 2] X = [4-j]\n # [ 2 j 4 -j] [4+j]\n # [ 0 2 j 4] [2+j]\n #\n ab = array([[0.0, 0.0, 2.0, 2.0],\n [-99, -1.0j, -1.0j, -1.0j],\n [4.0, 4.0, 4.0, 4.0]])\n b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])\n\n def test_02_complex(self):\n # Solve\n # [ 4 -j 2 0] [2-j 2+4j]\n # [ j 4 -j 2] X = [4-j -1-j]\n # [ 2 j 4 -j] [4+j 4+2j]\n # [ 0 2 j 4] [2+j j]\n #\n ab = array([[0.0, 0.0, 2.0, 2.0],\n [-99, -1.0j, -1.0j, -1.0j],\n [4.0, 4.0, 4.0, 4.0]])\n b = array([[2-1j, 2+4j],\n [4.0-1j, -1-1j],\n [4.0+1j, 4+2j],\n [2+1j, 1j]])\n x = solveh_banded(ab, b)\n expected = array([[0.0, 1.0j],\n [1.0, 0.0],\n [1.0, 1.0],\n [0.0, 0.0]])\n assert_array_almost_equal(x, expected)\n\n def test_tridiag_01_upper(self):\n # Solve\n # [ 4 1 0] [1]\n # [ 1 4 1] X = [4]\n # [ 0 1 4] [1]\n # with the RHS as a 1D array.\n ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])\n b = array([1.0, 4.0, 1.0])\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, [0.0, 1.0, 0.0])\n\n def test_tridiag_02_upper(self):\n # Solve\n # [ 4 1 0] [1 4]\n # [ 1 4 1] X = [4 2]\n # [ 0 1 4] [1 4]\n #\n ab = array([[-99, 1.0, 1.0],\n [4.0, 4.0, 4.0]])\n b = array([[1.0, 4.0],\n [4.0, 2.0],\n [1.0, 4.0]])\n x = solveh_banded(ab, b)\n expected = array([[0.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0]])\n assert_array_almost_equal(x, expected)\n\n def test_tridiag_03_upper(self):\n # Solve\n # [ 4 1 0] [1]\n # [ 1 4 1] X = [4]\n # [ 0 1 4] [1]\n # with the RHS as a 2D array with shape (3,1).\n ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])\n b = array([1.0, 4.0, 1.0]).reshape(-1, 1)\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1))\n\n def test_tridiag_01_lower(self):\n # Solve\n # [ 4 1 0] [1]\n # [ 1 4 1] X = [4]\n # [ 0 1 4] [1]\n #\n ab = array([[4.0, 4.0, 4.0],\n [1.0, 1.0, -99]])\n b = array([1.0, 4.0, 1.0])\n x = solveh_banded(ab, b, lower=True)\n assert_array_almost_equal(x, [0.0, 1.0, 0.0])\n\n def test_tridiag_02_lower(self):\n # Solve\n # [ 4 1 0] [1 4]\n # [ 1 4 1] X = [4 2]\n # [ 0 1 4] [1 4]\n #\n ab = array([[4.0, 4.0, 4.0],\n [1.0, 1.0, -99]])\n b = array([[1.0, 4.0],\n [4.0, 2.0],\n [1.0, 4.0]])\n x = solveh_banded(ab, b, lower=True)\n expected = array([[0.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0]])\n assert_array_almost_equal(x, expected)\n\n def test_tridiag_01_float32(self):\n # Solve\n # [ 4 1 0] [1]\n # [ 1 4 1] X = [4]\n # [ 0 1 4] [1]\n #\n ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)\n b = array([1.0, 4.0, 1.0], dtype=float32)\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, [0.0, 1.0, 0.0])\n\n def test_tridiag_02_float32(self):\n # Solve\n # [ 4 1 0] [1 4]\n # [ 1 4 1] X = [4 2]\n # [ 0 1 4] [1 4]\n #\n ab = array([[-99, 1.0, 1.0],\n [4.0, 4.0, 4.0]], dtype=float32)\n b = array([[1.0, 4.0],\n [4.0, 2.0],\n [1.0, 4.0]], dtype=float32)\n x = solveh_banded(ab, b)\n expected = array([[0.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0]])\n assert_array_almost_equal(x, expected)\n\n def test_tridiag_01_complex(self):\n # Solve\n # [ 4 -j 0] [ -j]\n # [ j 4 -j] X = [4-j]\n # [ 0 j 4] [4+j]\n #\n ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])\n b = array([-1.0j, 4.0-1j, 4+1j])\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, [0.0, 1.0, 1.0])\n\n def test_tridiag_02_complex(self):\n # Solve\n # [ 4 -j 0] [ -j 4j]\n # [ j 4 -j] X = [4-j -1-j]\n # [ 0 j 4] [4+j 4 ]\n #\n ab = array([[-99, -1.0j, -1.0j],\n [4.0, 4.0, 4.0]])\n b = array([[-1j, 4.0j],\n [4.0-1j, -1.0-1j],\n [4.0+1j, 4.0]])\n x = solveh_banded(ab, b)\n expected = array([[0.0, 1.0j],\n [1.0, 0.0],\n [1.0, 1.0]])\n assert_array_almost_equal(x, expected)\n\n def test_check_finite(self):\n # Solve\n # [ 4 1 0] [1]\n # [ 1 4 1] X = [4]\n # [ 0 1 4] [1]\n # with the RHS as a 1D array.\n ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])\n b = array([1.0, 4.0, 1.0])\n x = solveh_banded(ab, b, check_finite=False)\n assert_array_almost_equal(x, [0.0, 1.0, 0.0])\n\n def test_bad_shapes(self):\n ab = array([[-99, 1.0, 1.0],\n [4.0, 4.0, 4.0]])\n b = array([[1.0, 4.0],\n [4.0, 2.0]])\n assert_raises(ValueError, solveh_banded, ab, b)\n assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])\n assert_raises(ValueError, solveh_banded, ab, [1.0])\n\n def test_1x1(self):\n x = solveh_banded([[1]], [[1, 2, 3]])\n assert_array_equal(x, [[1.0, 2.0, 3.0]])\n assert_equal(x.dtype, np.dtype('f8'))\n\n def test_native_list_arguments(self):\n # Same as test_01_upper, using python's native list.\n ab = [[0.0, 0.0, 2.0, 2.0],\n [-99, 1.0, 1.0, 1.0],\n [4.0, 4.0, 4.0, 4.0]]\n b = [1.0, 4.0, 1.0, 2.0]\n x = solveh_banded(ab, b)\n assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])\n\n\nclass TestSolve(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_20Feb04_bug(self):\n a = [[1, 1], [1.0, 0]] # ok\n x0 = solve(a, [1, 0j])\n assert_array_almost_equal(dot(a, x0), [1, 0])\n\n # gives failure with clapack.zgesv(..,rowmajor=0)\n a = [[1, 1], [1.2, 0]]\n b = [1, 0j]\n x0 = solve(a, b)\n assert_array_almost_equal(dot(a, x0), [1, 0])\n\n def test_simple(self):\n a = [[1, 20], [-30, 4]]\n for b in ([[1, 0], [0, 1]], [1, 0],\n [[2, 1], [-30, 4]]):\n x = solve(a, b)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_simple_sym(self):\n a = [[2, 3], [3, 5]]\n for lower in [0, 1]:\n for b in ([[1, 0], [0, 1]], [1, 0]):\n x = solve(a, b, sym_pos=1, lower=lower)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_simple_sym_complex(self):\n a = [[5, 2], [2, 4]]\n for b in [[1j, 0],\n [[1j, 1j],\n [0, 2]],\n ]:\n x = solve(a, b, sym_pos=1)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_simple_complex(self):\n a = array([[5, 2], [2j, 4]], 'D')\n for b in [[1j, 0],\n [[1j, 1j],\n [0, 2]],\n [1, 0j],\n array([1, 0], 'D'),\n ]:\n x = solve(a, b)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_nils_20Feb04(self):\n n = 2\n A = random([n, n])+random([n, n])*1j\n X = zeros((n, n), 'D')\n Ainv = inv(A)\n R = identity(n)+identity(n)*0j\n for i in arange(0, n):\n r = R[:, i]\n X[:, i] = solve(A, r)\n assert_array_almost_equal(X, Ainv)\n\n def test_random(self):\n\n n = 20\n a = random([n, n])\n for i in range(n):\n a[i, i] = 20*(.1+a[i, i])\n for i in range(4):\n b = random([n, 3])\n x = solve(a, b)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_random_complex(self):\n n = 20\n a = random([n, n]) + 1j * random([n, n])\n for i in range(n):\n a[i, i] = 20*(.1+a[i, i])\n for i in range(2):\n b = random([n, 3])\n x = solve(a, b)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_random_sym(self):\n n = 20\n a = random([n, n])\n for i in range(n):\n a[i, i] = abs(20*(.1+a[i, i]))\n for j in range(i):\n a[i, j] = a[j, i]\n for i in range(4):\n b = random([n])\n x = solve(a, b, sym_pos=1)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_random_sym_complex(self):\n n = 20\n a = random([n, n])\n # XXX: with the following addition the accuracy will be very low\n a = a + 1j*random([n, n])\n for i in range(n):\n a[i, i] = abs(20*(.1+a[i, i]))\n for j in range(i):\n a[i, j] = conjugate(a[j, i])\n b = random([n])+2j*random([n])\n for i in range(2):\n x = solve(a, b, sym_pos=1)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_check_finite(self):\n a = [[1, 20], [-30, 4]]\n for b in ([[1, 0], [0, 1]], [1, 0],\n [[2, 1], [-30, 4]]):\n x = solve(a, b, check_finite=False)\n assert_array_almost_equal(dot(a, x), b)\n\n def test_scalar_a_and_1D_b(self):\n a = 1\n b = [1, 2, 3]\n x = solve(a, b)\n assert_array_almost_equal(x.ravel(), b)\n assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape')\n\n def test_simple2(self):\n a = np.array([[1.80, 2.88, 2.05, -0.89],\n [525.00, -295.00, -95.00, -380.00],\n [1.58, -2.69, -2.90, -1.04],\n [-1.11, -0.66, -0.59, 0.80]])\n\n b = np.array([[9.52, 18.47],\n [2435.00, 225.00],\n [0.77, -13.28],\n [-6.22, -6.21]])\n\n x = solve(a, b)\n assert_array_almost_equal(x, np.array([[1., -1, 3, -5],\n [3, 2, 4, 1]]).T)\n\n def test_simple_complex2(self):\n a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j],\n [-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j],\n [-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j],\n [2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]])\n\n b = np.array([[26.26+51.78j, 31.32-6.70j],\n [64.30-86.80j, 158.60-14.20j],\n [-5.75+25.31j, -2.15+30.19j],\n [1.16+2.57j, -2.56+7.55j]])\n\n x = solve(a, b)\n assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j],\n [2-3.j, 5+1.j],\n [-4-5.j, -3+4.j],\n [6.j, 2-3.j]]))\n\n def test_hermitian(self):\n # An upper triangular matrix will be used for hermitian matrix a\n a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j],\n [0, -4.63, -1.84+0.03j, 2.21+0.21j],\n [0, 0, -8.87, 1.58-0.90j],\n [0, 0, 0, -1.36]])\n b = np.array([[2.98-10.18j, 28.68-39.89j],\n [-9.58+3.88j, -24.79-8.40j],\n [-0.77-16.05j, 4.23-70.02j],\n [7.79+5.48j, -35.39+18.01j]])\n res = np.array([[2.+1j, -8+6j],\n [3.-2j, 7-2j],\n [-1+2j, -1+5j],\n [1.-1j, 3-4j]])\n x = solve(a, b, assume_a='her')\n assert_array_almost_equal(x, res)\n # Also conjugate a and test for lower triangular data\n x = solve(a.conj().T, b, assume_a='her', lower=True)\n assert_array_almost_equal(x, res)\n\n def test_pos_and_sym(self):\n A = np.arange(1, 10).reshape(3, 3)\n x = solve(np.tril(A)/9, np.ones(3), assume_a='pos')\n assert_array_almost_equal(x, [9., 1.8, 1.])\n x = solve(np.tril(A)/9, np.ones(3), assume_a='sym')\n assert_array_almost_equal(x, [9., 1.8, 1.])\n\n def test_singularity(self):\n a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1],\n [1, 1, 1, 0, 0, 0, 1, 0, 1],\n [0, 1, 1, 0, 0, 0, 1, 0, 1],\n [1, 0, 1, 1, 1, 1, 0, 0, 0],\n [1, 0, 1, 1, 1, 1, 0, 0, 0],\n [1, 0, 1, 1, 1, 1, 0, 0, 0],\n [1, 0, 1, 1, 1, 1, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1]])\n b = np.arange(9)[:, None]\n assert_raises(LinAlgError, solve, a, b)\n\n def test_ill_condition_warning(self):\n a = np.array([[1, 1], [1+1e-16, 1-1e-16]])\n b = np.ones(2)\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n assert_raises(RuntimeWarning, solve, a, b)\n\n def test_empty_rhs(self):\n a = np.eye(2)\n b = [[], []]\n x = solve(a, b)\n assert_(x.size == 0, 'Returned array is not empty')\n assert_(x.shape == (2, 0), 'Returned empty array shape is wrong')\n\n def test_multiple_rhs(self):\n a = np.eye(2)\n b = np.random.rand(2, 3, 4)\n x = solve(a, b)\n assert_array_almost_equal(x, b)\n\n def test_transposed_keyword(self):\n A = np.arange(9).reshape(3, 3) + 1\n x = solve(np.tril(A)/9, np.ones(3), transposed=True)\n assert_array_almost_equal(x, [1.2, 0.2, 1])\n x = solve(np.tril(A)/9, np.ones(3), transposed=False)\n assert_array_almost_equal(x, [9, -5.4, -1.2])\n\n def test_transposed_notimplemented(self):\n a = np.eye(3).astype(complex)\n with assert_raises(NotImplementedError):\n solve(a, a, transposed=True)\n\n def test_nonsquare_a(self):\n assert_raises(ValueError, solve, [1, 2], 1)\n\n def test_size_mismatch_with_1D_b(self):\n assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3))\n assert_raises(ValueError, solve, np.eye(3), np.ones(4))\n\n def test_assume_a_keyword(self):\n assert_raises(ValueError, solve, 1, 1, assume_a='zxcv')\n\n @pytest.mark.skip(reason=\"Failure on OS X (gh-7500), \"\n \"crash on Windows (gh-8064)\")\n def test_all_type_size_routine_combinations(self):\n sizes = [10, 100, 1000]\n assume_as = ['gen', 'sym', 'pos', 'her']\n dtypes = [np.float32, np.float64, np.complex64, np.complex128]\n for size, assume_a, dtype in itertools.product(sizes,\n assume_as,\n dtypes):\n is_complex = dtype in (np.complex64, np.complex128)\n if assume_a == 'her' and not is_complex:\n continue\n\n err_msg = (\"Failed for size: {}, assume_a: {},\"\n \"dtype: {}\".format(size, assume_a, dtype))\n\n a = np.random.randn(size, size).astype(dtype)\n b = np.random.randn(size).astype(dtype)\n if is_complex:\n a = a + (1j*np.random.randn(size, size)).astype(dtype)\n\n if assume_a == 'sym': # Can still be complex but only symmetric\n a = a + a.T\n elif assume_a == 'her': # Handle hermitian matrices here instead\n a = a + a.T.conj()\n elif assume_a == 'pos':\n a = a.conj().T.dot(a) + 0.1*np.eye(size)\n\n tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6\n\n if assume_a in ['gen', 'sym', 'her']:\n # We revert the tolerance from before\n # 4b4a6e7c34fa4060533db38f9a819b98fa81476c\n if dtype in (np.float32, np.complex64):\n tol *= 10\n\n x = solve(a, b, assume_a=assume_a)\n assert_allclose(a.dot(x), b,\n atol=tol * size,\n rtol=tol * size,\n err_msg=err_msg)\n\n if assume_a == 'sym' and dtype not in (np.complex64, np.complex128):\n x = solve(a, b, assume_a=assume_a, transposed=True)\n assert_allclose(a.dot(x), b,\n atol=tol * size,\n rtol=tol * size,\n err_msg=err_msg)\n\n\nclass TestSolveTriangular(object):\n\n def test_simple(self):\n \"\"\"\n solve_triangular on a simple 2x2 matrix.\n \"\"\"\n A = array([[1, 0], [1, 2]])\n b = [1, 1]\n sol = solve_triangular(A, b, lower=True)\n assert_array_almost_equal(sol, [1, 0])\n\n # check that it works also for non-contiguous matrices\n sol = solve_triangular(A.T, b, lower=False)\n assert_array_almost_equal(sol, [.5, .5])\n\n # and that it gives the same result as trans=1\n sol = solve_triangular(A, b, lower=True, trans=1)\n assert_array_almost_equal(sol, [.5, .5])\n\n b = identity(2)\n sol = solve_triangular(A, b, lower=True, trans=1)\n assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]])\n\n def test_simple_complex(self):\n \"\"\"\n solve_triangular on a simple 2x2 complex matrix\n \"\"\"\n A = array([[1+1j, 0], [1j, 2]])\n b = identity(2)\n sol = solve_triangular(A, b, lower=True, trans=1)\n assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]])\n\n def test_check_finite(self):\n \"\"\"\n solve_triangular on a simple 2x2 matrix.\n \"\"\"\n A = array([[1, 0], [1, 2]])\n b = [1, 1]\n sol = solve_triangular(A, b, lower=True, check_finite=False)\n assert_array_almost_equal(sol, [1, 0])\n\n\nclass TestInv(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_simple(self):\n a = [[1, 2], [3, 4]]\n a_inv = inv(a)\n assert_array_almost_equal(dot(a, a_inv), np.eye(2))\n a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]]\n a_inv = inv(a)\n assert_array_almost_equal(dot(a, a_inv), np.eye(3))\n\n def test_random(self):\n n = 20\n for i in range(4):\n a = random([n, n])\n for i in range(n):\n a[i, i] = 20*(.1+a[i, i])\n a_inv = inv(a)\n assert_array_almost_equal(dot(a, a_inv),\n identity(n))\n\n def test_simple_complex(self):\n a = [[1, 2], [3, 4j]]\n a_inv = inv(a)\n assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])\n\n def test_random_complex(self):\n n = 20\n for i in range(4):\n a = random([n, n])+2j*random([n, n])\n for i in range(n):\n a[i, i] = 20*(.1+a[i, i])\n a_inv = inv(a)\n assert_array_almost_equal(dot(a, a_inv),\n identity(n))\n\n def test_check_finite(self):\n a = [[1, 2], [3, 4]]\n a_inv = inv(a, check_finite=False)\n assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])\n\n\nclass TestDet(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_simple(self):\n a = [[1, 2], [3, 4]]\n a_det = det(a)\n assert_almost_equal(a_det, -2.0)\n\n def test_simple_complex(self):\n a = [[1, 2], [3, 4j]]\n a_det = det(a)\n assert_almost_equal(a_det, -6+4j)\n\n def test_random(self):\n basic_det = linalg.det\n n = 20\n for i in range(4):\n a = random([n, n])\n d1 = det(a)\n d2 = basic_det(a)\n assert_almost_equal(d1, d2)\n\n def test_random_complex(self):\n basic_det = linalg.det\n n = 20\n for i in range(4):\n a = random([n, n]) + 2j*random([n, n])\n d1 = det(a)\n d2 = basic_det(a)\n assert_allclose(d1, d2, rtol=1e-13)\n\n def test_check_finite(self):\n a = [[1, 2], [3, 4]]\n a_det = det(a, check_finite=False)\n assert_almost_equal(a_det, -2.0)\n\n\ndef direct_lstsq(a, b, cmplx=0):\n at = transpose(a)\n if cmplx:\n at = conjugate(at)\n a1 = dot(at, a)\n b1 = dot(at, b)\n return solve(a1, b1)\n\n\nclass TestLstsq(object):\n\n lapack_drivers = ('gelsd', 'gelss', 'gelsy', None)\n\n def setup_method(self):\n np.random.seed(1234)\n\n def test_simple_exact(self):\n for dtype in REAL_DTYPES:\n a = np.array([[1, 20], [-30, 4]], dtype=dtype)\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n for bt in (((1, 0), (0, 1)), (1, 0),\n ((2, 1), (-30, 4))):\n # Store values in case they are overwritten\n # later\n a1 = a.copy()\n b = np.array(bt, dtype=dtype)\n b1 = b.copy()\n try:\n out = lstsq(a1, b1,\n lapack_driver=lapack_driver,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n except LstsqLapackError:\n if lapack_driver is None:\n mesg = ('LstsqLapackError raised with '\n 'lapack_driver being None.')\n raise AssertionError(mesg)\n else:\n # can't proceed, skip to the next iteration\n continue\n\n x = out[0]\n r = out[2]\n assert_(r == 2,\n 'expected efficient rank 2, got %s' % r)\n assert_allclose(\n dot(a, x), b,\n atol=25 * _eps_cast(a1.dtype),\n rtol=25 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_simple_overdet(self):\n for dtype in REAL_DTYPES:\n a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype)\n b = np.array([1, 2, 3], dtype=dtype)\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n # Store values in case they are overwritten later\n a1 = a.copy()\n b1 = b.copy()\n try:\n out = lstsq(a1, b1, lapack_driver=lapack_driver,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n except LstsqLapackError:\n if lapack_driver is None:\n mesg = ('LstsqLapackError raised with '\n 'lapack_driver being None.')\n raise AssertionError(mesg)\n else:\n # can't proceed, skip to the next iteration\n continue\n\n x = out[0]\n if lapack_driver == 'gelsy':\n residuals = np.sum((b - a.dot(x))**2)\n else:\n residuals = out[1]\n r = out[2]\n assert_(r == 2, 'expected efficient rank 2, got %s' % r)\n assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),\n residuals,\n rtol=25 * _eps_cast(a1.dtype),\n atol=25 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n assert_allclose(x, (-0.428571428571429, 0.85714285714285),\n rtol=25 * _eps_cast(a1.dtype),\n atol=25 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_simple_overdet_complex(self):\n for dtype in COMPLEX_DTYPES:\n a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype)\n b = np.array([1, 2+4j, 3], dtype=dtype)\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n # Store values in case they are overwritten later\n a1 = a.copy()\n b1 = b.copy()\n try:\n out = lstsq(a1, b1, lapack_driver=lapack_driver,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n except LstsqLapackError:\n if lapack_driver is None:\n mesg = ('LstsqLapackError raised with '\n 'lapack_driver being None.')\n raise AssertionError(mesg)\n else:\n # can't proceed, skip to the next iteration\n continue\n\n x = out[0]\n if lapack_driver == 'gelsy':\n res = b - a.dot(x)\n residuals = np.sum(res * res.conj())\n else:\n residuals = out[1]\n r = out[2]\n assert_(r == 2, 'expected efficient rank 2, got %s' % r)\n assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),\n residuals,\n rtol=25 * _eps_cast(a1.dtype),\n atol=25 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n assert_allclose(\n x, (-0.4831460674157303 + 0.258426966292135j,\n 0.921348314606741 + 0.292134831460674j),\n rtol=25 * _eps_cast(a1.dtype),\n atol=25 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_simple_underdet(self):\n for dtype in REAL_DTYPES:\n a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)\n b = np.array([1, 2], dtype=dtype)\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n # Store values in case they are overwritten later\n a1 = a.copy()\n b1 = b.copy()\n try:\n out = lstsq(a1, b1, lapack_driver=lapack_driver,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n except LstsqLapackError:\n if lapack_driver is None:\n mesg = ('LstsqLapackError raised with '\n 'lapack_driver being None.')\n raise AssertionError(mesg)\n else:\n # can't proceed, skip to the next iteration\n continue\n\n x = out[0]\n r = out[2]\n assert_(r == 2, 'expected efficient rank 2, got %s' % r)\n assert_allclose(x, (-0.055555555555555, 0.111111111111111,\n 0.277777777777777),\n rtol=25 * _eps_cast(a1.dtype),\n atol=25 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_random_exact(self):\n for dtype in REAL_DTYPES:\n for n in (20, 200):\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n a = np.asarray(random([n, n]), dtype=dtype)\n for i in range(n):\n a[i, i] = 20 * (0.1 + a[i, i])\n for i in range(4):\n b = np.asarray(random([n, 3]), dtype=dtype)\n # Store values in case they are overwritten later\n a1 = a.copy()\n b1 = b.copy()\n try:\n out = lstsq(a1, b1,\n lapack_driver=lapack_driver,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n except LstsqLapackError:\n if lapack_driver is None:\n mesg = ('LstsqLapackError raised with '\n 'lapack_driver being None.')\n raise AssertionError(mesg)\n else:\n # can't proceed, skip to the next iteration\n continue\n x = out[0]\n r = out[2]\n assert_(r == n, 'expected efficient rank %s, '\n 'got %s' % (n, r))\n if dtype is np.float32:\n assert_allclose(\n dot(a, x), b,\n rtol=500 * _eps_cast(a1.dtype),\n atol=500 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n else:\n assert_allclose(\n dot(a, x), b,\n rtol=1000 * _eps_cast(a1.dtype),\n atol=1000 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_random_complex_exact(self):\n for dtype in COMPLEX_DTYPES:\n for n in (20, 200):\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n a = np.asarray(random([n, n]) + 1j*random([n, n]),\n dtype=dtype)\n for i in range(n):\n a[i, i] = 20 * (0.1 + a[i, i])\n for i in range(2):\n b = np.asarray(random([n, 3]), dtype=dtype)\n # Store values in case they are overwritten later\n a1 = a.copy()\n b1 = b.copy()\n out = lstsq(a1, b1, lapack_driver=lapack_driver,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n x = out[0]\n r = out[2]\n assert_(r == n, 'expected efficient rank %s, '\n 'got %s' % (n, r))\n if dtype is np.complex64:\n assert_allclose(\n dot(a, x), b,\n rtol=400 * _eps_cast(a1.dtype),\n atol=400 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n else:\n assert_allclose(\n dot(a, x), b,\n rtol=1000 * _eps_cast(a1.dtype),\n atol=1000 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_random_overdet(self):\n for dtype in REAL_DTYPES:\n for (n, m) in ((20, 15), (200, 2)):\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n a = np.asarray(random([n, m]), dtype=dtype)\n for i in range(m):\n a[i, i] = 20 * (0.1 + a[i, i])\n for i in range(4):\n b = np.asarray(random([n, 3]), dtype=dtype)\n # Store values in case they are overwritten later\n a1 = a.copy()\n b1 = b.copy()\n try:\n out = lstsq(a1, b1,\n lapack_driver=lapack_driver,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n except LstsqLapackError:\n if lapack_driver is None:\n mesg = ('LstsqLapackError raised with '\n 'lapack_driver being None.')\n raise AssertionError(mesg)\n else:\n # can't proceed, skip to the next iteration\n continue\n\n x = out[0]\n r = out[2]\n assert_(r == m, 'expected efficient rank %s, '\n 'got %s' % (m, r))\n assert_allclose(\n x, direct_lstsq(a, b, cmplx=0),\n rtol=25 * _eps_cast(a1.dtype),\n atol=25 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_random_complex_overdet(self):\n for dtype in COMPLEX_DTYPES:\n for (n, m) in ((20, 15), (200, 2)):\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n a = np.asarray(random([n, m]) + 1j*random([n, m]),\n dtype=dtype)\n for i in range(m):\n a[i, i] = 20 * (0.1 + a[i, i])\n for i in range(2):\n b = np.asarray(random([n, 3]), dtype=dtype)\n # Store values in case they are overwritten\n # later\n a1 = a.copy()\n b1 = b.copy()\n out = lstsq(a1, b1,\n lapack_driver=lapack_driver,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n x = out[0]\n r = out[2]\n assert_(r == m, 'expected efficient rank %s, '\n 'got %s' % (m, r))\n assert_allclose(\n x, direct_lstsq(a, b, cmplx=1),\n rtol=25 * _eps_cast(a1.dtype),\n atol=25 * _eps_cast(a1.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_check_finite(self):\n with suppress_warnings() as sup:\n # On (some) OSX this tests triggers a warning (gh-7538)\n sup.filter(RuntimeWarning,\n \"internal gelsd driver lwork query error,.*\"\n \"Falling back to 'gelss' driver.\")\n for dtype in REAL_DTYPES:\n a = np.array(((1, 20), (-30, 4)), dtype=dtype)\n for bt in (((1, 0), (0, 1)), (1, 0),\n ((2, 1), (-30, 4))):\n for lapack_driver in TestLstsq.lapack_drivers:\n for overwrite in (True, False):\n for check_finite in (True, False):\n b = np.array(bt, dtype=dtype)\n # Store values in case they are overwritten\n # later\n a1 = a.copy()\n b1 = b.copy()\n try:\n out = lstsq(a1, b1,\n lapack_driver=lapack_driver,\n check_finite=check_finite,\n overwrite_a=overwrite,\n overwrite_b=overwrite)\n except LstsqLapackError:\n if lapack_driver is None:\n mesg = (\n 'LstsqLapackError raised with '\n 'lapack_driver being None.')\n raise AssertionError(mesg)\n else:\n # can't proceed,\n # skip to the next iteration\n continue\n x = out[0]\n r = out[2]\n assert_(r == 2,\n 'expected efficient rank 2, '\n 'got %s' % r)\n assert_allclose(\n dot(a, x), b,\n rtol=25 * _eps_cast(a.dtype),\n atol=25 * _eps_cast(a.dtype),\n err_msg=\"driver: %s\" % lapack_driver)\n\n def test_zero_size(self):\n for a_shape, b_shape in (((0, 2), (0,)),\n ((0, 4), (0, 2)),\n ((4, 0), (4,)),\n ((4, 0), (4, 2))):\n b = np.ones(b_shape)\n x, residues, rank, s = lstsq(np.zeros(a_shape), b)\n assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:]))\n residues_should_be = (np.empty((0,)) if a_shape[1]\n else np.linalg.norm(b, axis=0)**2)\n assert_equal(residues, residues_should_be)\n assert_(rank == 0, 'expected rank 0')\n assert_equal(s, np.empty((0,)))\n\n\nclass TestPinv(object):\n\n def test_simple_real(self):\n a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)\n a_pinv = pinv(a)\n assert_array_almost_equal(dot(a, a_pinv), np.eye(3))\n a_pinv = pinv2(a)\n assert_array_almost_equal(dot(a, a_pinv), np.eye(3))\n\n def test_simple_complex(self):\n a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],\n dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],\n dtype=float))\n a_pinv = pinv(a)\n assert_array_almost_equal(dot(a, a_pinv), np.eye(3))\n a_pinv = pinv2(a)\n assert_array_almost_equal(dot(a, a_pinv), np.eye(3))\n\n def test_simple_singular(self):\n a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)\n a_pinv = pinv(a)\n a_pinv2 = pinv2(a)\n assert_array_almost_equal(a_pinv, a_pinv2)\n\n def test_simple_cols(self):\n a = array([[1, 2, 3], [4, 5, 6]], dtype=float)\n a_pinv = pinv(a)\n a_pinv2 = pinv2(a)\n assert_array_almost_equal(a_pinv, a_pinv2)\n\n def test_simple_rows(self):\n a = array([[1, 2], [3, 4], [5, 6]], dtype=float)\n a_pinv = pinv(a)\n a_pinv2 = pinv2(a)\n assert_array_almost_equal(a_pinv, a_pinv2)\n\n def test_check_finite(self):\n a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]])\n a_pinv = pinv(a, check_finite=False)\n assert_array_almost_equal(dot(a, a_pinv), np.eye(3))\n a_pinv = pinv2(a, check_finite=False)\n assert_array_almost_equal(dot(a, a_pinv), np.eye(3))\n\n def test_native_list_argument(self):\n a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n a_pinv = pinv(a)\n a_pinv2 = pinv2(a)\n assert_array_almost_equal(a_pinv, a_pinv2)\n\n\nclass TestPinvSymmetric(object):\n\n def test_simple_real(self):\n a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)\n a = np.dot(a, a.T)\n a_pinv = pinvh(a)\n assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))\n\n def test_nonpositive(self):\n a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)\n a = np.dot(a, a.T)\n u, s, vt = np.linalg.svd(a)\n s[0] *= -1\n a = np.dot(u * s, vt) # a is now symmetric non-positive and singular\n a_pinv = pinv2(a)\n a_pinvh = pinvh(a)\n assert_array_almost_equal(a_pinv, a_pinvh)\n\n def test_simple_complex(self):\n a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],\n dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],\n dtype=float))\n a = np.dot(a, a.conj().T)\n a_pinv = pinvh(a)\n assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))\n\n def test_native_list_argument(self):\n a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)\n a = np.dot(a, a.T)\n a_pinv = pinvh(a.tolist())\n assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))\n\n\nclass TestVectorNorms(object):\n\n def test_types(self):\n for dtype in np.typecodes['AllFloat']:\n x = np.array([1, 2, 3], dtype=dtype)\n tol = max(1e-15, np.finfo(dtype).eps.real * 20)\n assert_allclose(norm(x), np.sqrt(14), rtol=tol)\n assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)\n\n for dtype in np.typecodes['Complex']:\n x = np.array([1j, 2j, 3j], dtype=dtype)\n tol = max(1e-15, np.finfo(dtype).eps.real * 20)\n assert_allclose(norm(x), np.sqrt(14), rtol=tol)\n assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)\n\n def test_overflow(self):\n # unlike numpy's norm, this one is\n # safer on overflow\n a = array([1e20], dtype=float32)\n assert_almost_equal(norm(a), a)\n\n def test_stable(self):\n # more stable than numpy's norm\n a = array([1e4] + [1]*10000, dtype=float32)\n try:\n # snrm in double precision; we obtain the same as for float64\n # -- large atol needed due to varying blas implementations\n assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2)\n except AssertionError:\n # snrm implemented in single precision, == np.linalg.norm result\n msg = \": Result should equal either 0.0 or 0.5 (depending on \" \\\n \"implementation of snrm2).\"\n assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg)\n\n def test_zero_norm(self):\n assert_equal(norm([1, 0, 3], 0), 2)\n assert_equal(norm([1, 2, 3], 0), 3)\n\n def test_axis_kwd(self):\n a = np.array([[[2, 1], [3, 4]]] * 2, 'd')\n assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2)\n assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2)\n\n @pytest.mark.skipif(NumpyVersion(np.__version__) < '1.10.0', reason=\"\")\n def test_keepdims_kwd(self):\n a = np.array([[[2, 1], [3, 4]]] * 2, 'd')\n b = norm(a, axis=1, keepdims=True)\n assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2)\n assert_(b.shape == (2, 1, 2))\n assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2)\n\n\nclass TestMatrixNorms(object):\n\n def test_matrix_norms(self):\n # Not all of these are matrix norms in the most technical sense.\n np.random.seed(1234)\n for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4):\n for t in np.single, np.double, np.csingle, np.cdouble, np.int64:\n A = 10 * np.random.randn(n, m).astype(t)\n if np.issubdtype(A.dtype, np.complexfloating):\n A = (A + 10j * np.random.randn(n, m)).astype(t)\n t_high = np.cdouble\n else:\n t_high = np.double\n for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf):\n actual = norm(A, ord=order)\n desired = np.linalg.norm(A, ord=order)\n # SciPy may return higher precision matrix norms.\n # This is a consequence of using LAPACK.\n if not np.allclose(actual, desired):\n desired = np.linalg.norm(A.astype(t_high), ord=order)\n assert_allclose(actual, desired)\n\n def test_axis_kwd(self):\n a = np.array([[[2, 1], [3, 4]]] * 2, 'd')\n b = norm(a, ord=np.inf, axis=(1, 0))\n c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1))\n d = norm(a, ord=1, axis=(0, 1))\n assert_allclose(b, c)\n assert_allclose(c, d)\n assert_allclose(b, d)\n assert_(b.shape == c.shape == d.shape)\n b = norm(a, ord=1, axis=(1, 0))\n c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1))\n d = norm(a, ord=np.inf, axis=(0, 1))\n assert_allclose(b, c)\n assert_allclose(c, d)\n assert_allclose(b, d)\n assert_(b.shape == c.shape == d.shape)\n\n @pytest.mark.skipif(NumpyVersion(np.__version__) < '1.10.0', reason=\"\")\n def test_keepdims_kwd(self):\n a = np.arange(120, dtype='d').reshape(2, 3, 4, 5)\n b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True)\n c = norm(a, ord=1, axis=(0, 1), keepdims=True)\n assert_allclose(b, c)\n assert_(b.shape == c.shape)\n\n\nclass TestOverwrite(object):\n def test_solve(self):\n assert_no_overwrite(solve, [(3, 3), (3,)])\n\n def test_solve_triangular(self):\n assert_no_overwrite(solve_triangular, [(3, 3), (3,)])\n\n def test_solve_banded(self):\n assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b),\n [(4, 6), (6,)])\n\n def test_solveh_banded(self):\n assert_no_overwrite(solveh_banded, [(2, 6), (6,)])\n\n def test_inv(self):\n assert_no_overwrite(inv, [(3, 3)])\n\n def test_det(self):\n assert_no_overwrite(det, [(3, 3)])\n\n def test_lstsq(self):\n assert_no_overwrite(lstsq, [(3, 2), (3,)])\n\n def test_pinv(self):\n assert_no_overwrite(pinv, [(3, 3)])\n\n def test_pinv2(self):\n assert_no_overwrite(pinv2, [(3, 3)])\n\n def test_pinvh(self):\n assert_no_overwrite(pinvh, [(3, 3)])\n\n\nclass TestSolveCirculant(object):\n\n def test_basic1(self):\n c = np.array([1, 2, 3, 5])\n b = np.array([1, -1, 1, 0])\n x = solve_circulant(c, b)\n y = solve(circulant(c), b)\n assert_allclose(x, y)\n\n def test_basic2(self):\n # b is a 2-d matrix.\n c = np.array([1, 2, -3, -5])\n b = np.arange(12).reshape(4, 3)\n x = solve_circulant(c, b)\n y = solve(circulant(c), b)\n assert_allclose(x, y)\n\n def test_basic3(self):\n # b is a 3-d matrix.\n c = np.array([1, 2, -3, -5])\n b = np.arange(24).reshape(4, 3, 2)\n x = solve_circulant(c, b)\n y = solve(circulant(c), b)\n assert_allclose(x, y)\n\n def test_complex(self):\n # Complex b and c\n c = np.array([1+2j, -3, 4j, 5])\n b = np.arange(8).reshape(4, 2) + 0.5j\n x = solve_circulant(c, b)\n y = solve(circulant(c), b)\n assert_allclose(x, y)\n\n def test_random_b_and_c(self):\n # Random b and c\n np.random.seed(54321)\n c = np.random.randn(50)\n b = np.random.randn(50)\n x = solve_circulant(c, b)\n y = solve(circulant(c), b)\n assert_allclose(x, y)\n\n def test_singular(self):\n # c gives a singular circulant matrix.\n c = np.array([1, 1, 0, 0])\n b = np.array([1, 2, 3, 4])\n x = solve_circulant(c, b, singular='lstsq')\n y, res, rnk, s = lstsq(circulant(c), b)\n assert_allclose(x, y)\n assert_raises(LinAlgError, solve_circulant, x, y)\n\n def test_axis_args(self):\n # Test use of caxis, baxis and outaxis.\n\n # c has shape (2, 1, 4)\n c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]])\n\n # b has shape (3, 4)\n b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]])\n\n x = solve_circulant(c, b, baxis=1)\n assert_equal(x.shape, (4, 2, 3))\n expected = np.empty_like(x)\n expected[:, 0, :] = solve(circulant(c[0]), b.T)\n expected[:, 1, :] = solve(circulant(c[1]), b.T)\n assert_allclose(x, expected)\n\n x = solve_circulant(c, b, baxis=1, outaxis=-1)\n assert_equal(x.shape, (2, 3, 4))\n assert_allclose(np.rollaxis(x, -1), expected)\n\n # np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3).\n x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1)\n assert_equal(x.shape, (4, 2, 3))\n assert_allclose(x, expected)\n\n def test_native_list_arguments(self):\n # Same as test_basic1 using python's native list.\n c = [1, 2, 3, 5]\n b = [1, -1, 1, 0]\n x = solve_circulant(c, b)\n y = solve(circulant(c), b)\n assert_allclose(x, y)\n\n\nclass TestMatrix_Balance(object):\n\n def test_string_arg(self):\n assert_raises(ValueError, matrix_balance, 'Some string for fail')\n\n def test_infnan_arg(self):\n assert_raises(ValueError, matrix_balance,\n np.array([[1, 2], [3, np.inf]]))\n assert_raises(ValueError, matrix_balance,\n np.array([[1, 2], [3, np.nan]]))\n\n def test_scaling(self):\n _, y = matrix_balance(np.array([[1000, 1], [1000, 0]]))\n # Pre/post LAPACK 3.5.0 gives the same result up to an offset\n # since in each case col norm is x1000 greater and\n # 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5.\n assert_allclose(int(np.diff(np.log2(np.diag(y)))), 5)\n\n def test_scaling_order(self):\n A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]])\n x, y = matrix_balance(A)\n assert_allclose(solve(y, A).dot(y), x)\n\n def test_separate(self):\n _, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]),\n separate=1)\n assert_equal(int(np.diff(np.log2(y))), 5)\n assert_allclose(z, np.arange(2))\n\n def test_permutation(self):\n A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))),\n np.ones((3, 3)))\n x, (y, z) = matrix_balance(A, separate=1)\n assert_allclose(y, np.ones_like(y))\n assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2]))\n\n def test_perm_and_scaling(self):\n # Matrix with its diagonal removed\n cases = ( # Case 0\n np.array([[0., 0., 0., 0., 0.000002],\n [0., 0., 0., 0., 0.],\n [2., 2., 0., 0., 0.],\n [2., 2., 0., 0., 0.],\n [0., 0., 0.000002, 0., 0.]]),\n # Case 1 user reported GH-7258\n np.array([[-0.5, 0., 0., 0.],\n [0., -1., 0., 0.],\n [1., 0., -0.5, 0.],\n [0., 1., 0., -1.]]),\n # Case 2 user reported GH-7258\n np.array([[-3., 0., 1., 0.],\n [-1., -1., -0., 1.],\n [-3., -0., -0., 0.],\n [-1., -0., 1., -1.]])\n )\n\n for A in cases:\n x, y = matrix_balance(A)\n x, (s, p) = matrix_balance(A, separate=1)\n ip = np.empty_like(p)\n ip[p] = np.arange(A.shape[0])\n assert_allclose(y, np.diag(s)[ip, :])\n assert_allclose(solve(y, A).dot(y), x)\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nTesting that functions from compat work as expected\n\"\"\"\n\nfrom pandas.compat import (range, zip, map, filter, lrange, lzip, lmap,\n lfilter, builtins, iterkeys, itervalues, iteritems,\n next)\n\n\nclass TestBuiltinIterators(object):\n\n @classmethod\n def check_result(cls, actual, expected, lengths):\n for (iter_res, list_res), exp, length in zip(actual, expected,\n lengths):\n assert not isinstance(iter_res, list)\n assert isinstance(list_res, list)\n\n iter_res = list(iter_res)\n\n assert len(list_res) == length\n assert len(iter_res) == length\n assert iter_res == exp\n assert list_res == exp\n\n def test_range(self):\n actual1 = range(10)\n actual2 = lrange(10)\n actual = [actual1, actual2],\n expected = list(builtins.range(10)),\n lengths = 10,\n\n actual1 = range(1, 10, 2)\n actual2 = lrange(1, 10, 2)\n actual += [actual1, actual2],\n lengths += 5,\n expected += list(builtins.range(1, 10, 2)),\n self.check_result(actual, expected, lengths)\n\n def test_map(self):\n func = lambda x, y, z: x + y + z\n lst = [builtins.range(10), builtins.range(10), builtins.range(10)]\n actual1 = map(func, *lst)\n actual2 = lmap(func, *lst)\n actual = [actual1, actual2],\n expected = list(builtins.map(func, *lst)),\n lengths = 10,\n self.check_result(actual, expected, lengths)\n\n def test_filter(self):\n func = lambda x: x\n lst = list(builtins.range(10))\n actual1 = filter(func, lst)\n actual2 = lfilter(func, lst)\n actual = [actual1, actual2],\n lengths = 9,\n expected = list(builtins.filter(func, lst)),\n self.check_result(actual, expected, lengths)\n\n def test_zip(self):\n lst = [builtins.range(10), builtins.range(10), builtins.range(10)]\n actual = [zip(*lst), lzip(*lst)],\n expected = list(builtins.zip(*lst)),\n lengths = 10,\n self.check_result(actual, expected, lengths)\n\n def test_dict_iterators(self):\n assert next(itervalues({1: 2})) == 2\n assert next(iterkeys({1: 2})) == 1\n assert next(iteritems({1: 2})) == (1, 2)\n",
"\"\"\"This module is designed for community supported date conversion functions\"\"\"\nfrom pandas.compat import range, map\nimport numpy as np\nimport pandas._libs.lib as lib\n\n\ndef parse_date_time(date_col, time_col):\n date_col = _maybe_cast(date_col)\n time_col = _maybe_cast(time_col)\n return lib.try_parse_date_and_time(date_col, time_col)\n\n\ndef parse_date_fields(year_col, month_col, day_col):\n year_col = _maybe_cast(year_col)\n month_col = _maybe_cast(month_col)\n day_col = _maybe_cast(day_col)\n return lib.try_parse_year_month_day(year_col, month_col, day_col)\n\n\ndef parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,\n second_col):\n year_col = _maybe_cast(year_col)\n month_col = _maybe_cast(month_col)\n day_col = _maybe_cast(day_col)\n hour_col = _maybe_cast(hour_col)\n minute_col = _maybe_cast(minute_col)\n second_col = _maybe_cast(second_col)\n return lib.try_parse_datetime_components(year_col, month_col, day_col,\n hour_col, minute_col, second_col)\n\n\ndef generic_parser(parse_func, *cols):\n N = _check_columns(cols)\n results = np.empty(N, dtype=object)\n\n for i in range(N):\n args = [c[i] for c in cols]\n results[i] = parse_func(*args)\n\n return results\n\n\ndef _maybe_cast(arr):\n if not arr.dtype.type == np.object_:\n arr = np.array(arr, dtype=object)\n return arr\n\n\ndef _check_columns(cols):\n if not len(cols):\n raise AssertionError(\"There must be at least 1 column\")\n\n head, tail = cols[0], cols[1:]\n\n N = len(head)\n\n for i, n in enumerate(map(len, tail)):\n if n != N:\n raise AssertionError('All columns must have the same length: {0}; '\n 'column {1} has length {2}'.format(N, i, n))\n\n return N\n",
"#\n# Author: Travis Oliphant 2002-2011 with contributions from\n# SciPy Developers 2004-2011\n#\nfrom __future__ import division, print_function, absolute_import\n\nfrom scipy import special\nfrom scipy.special import entr, logsumexp, betaln, gammaln as gamln\nfrom scipy._lib._numpy_compat import broadcast_to\n\nfrom numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh\n\nimport numpy as np\n\nfrom ._distn_infrastructure import (\n rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)\n\n\nclass binom_gen(rv_discrete):\n \"\"\"A binomial discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `binom` is::\n\n binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)\n\n for ``k`` in ``{0, 1,..., n}``.\n\n `binom` takes ``n`` and ``p`` as shape parameters.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _rvs(self, n, p):\n return self._random_state.binomial(n, p, self._size)\n\n def _argcheck(self, n, p):\n self.b = n\n return (n >= 0) & (p >= 0) & (p <= 1)\n\n def _logpmf(self, x, n, p):\n k = floor(x)\n combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))\n return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)\n\n def _pmf(self, x, n, p):\n return exp(self._logpmf(x, n, p))\n\n def _cdf(self, x, n, p):\n k = floor(x)\n vals = special.bdtr(k, n, p)\n return vals\n\n def _sf(self, x, n, p):\n k = floor(x)\n return special.bdtrc(k, n, p)\n\n def _ppf(self, q, n, p):\n vals = ceil(special.bdtrik(q, n, p))\n vals1 = np.maximum(vals - 1, 0)\n temp = special.bdtr(vals1, n, p)\n return np.where(temp >= q, vals1, vals)\n\n def _stats(self, n, p, moments='mv'):\n q = 1.0 - p\n mu = n * p\n var = n * p * q\n g1, g2 = None, None\n if 's' in moments:\n g1 = (q - p) / sqrt(var)\n if 'k' in moments:\n g2 = (1.0 - 6*p*q) / var\n return mu, var, g1, g2\n\n def _entropy(self, n, p):\n k = np.r_[0:n + 1]\n vals = self._pmf(k, n, p)\n return np.sum(entr(vals), axis=0)\nbinom = binom_gen(name='binom')\n\n\nclass bernoulli_gen(binom_gen):\n \"\"\"A Bernoulli discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `bernoulli` is::\n\n bernoulli.pmf(k) = 1-p if k = 0\n = p if k = 1\n\n for ``k`` in ``{0, 1}``.\n\n `bernoulli` takes ``p`` as shape parameter.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _rvs(self, p):\n return binom_gen._rvs(self, 1, p)\n\n def _argcheck(self, p):\n return (p >= 0) & (p <= 1)\n\n def _logpmf(self, x, p):\n return binom._logpmf(x, 1, p)\n\n def _pmf(self, x, p):\n return binom._pmf(x, 1, p)\n\n def _cdf(self, x, p):\n return binom._cdf(x, 1, p)\n\n def _sf(self, x, p):\n return binom._sf(x, 1, p)\n\n def _ppf(self, q, p):\n return binom._ppf(q, 1, p)\n\n def _stats(self, p):\n return binom._stats(1, p)\n\n def _entropy(self, p):\n return entr(p) + entr(1-p)\nbernoulli = bernoulli_gen(b=1, name='bernoulli')\n\n\nclass nbinom_gen(rv_discrete):\n \"\"\"A negative binomial discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n Negative binomial distribution describes a sequence of i.i.d. Bernoulli \n trials, repeated until a predefined, non-random number of successes occurs.\n\n The probability mass function of the number of failures for `nbinom` is::\n\n nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k\n\n for ``k >= 0``.\n\n `nbinom` takes ``n`` and ``p`` as shape parameters where n is the number of\n successes, whereas p is the probability of a single success.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _rvs(self, n, p):\n return self._random_state.negative_binomial(n, p, self._size)\n\n def _argcheck(self, n, p):\n return (n > 0) & (p >= 0) & (p <= 1)\n\n def _pmf(self, x, n, p):\n return exp(self._logpmf(x, n, p))\n\n def _logpmf(self, x, n, p):\n coeff = gamln(n+x) - gamln(x+1) - gamln(n)\n return coeff + n*log(p) + special.xlog1py(x, -p)\n\n def _cdf(self, x, n, p):\n k = floor(x)\n return special.betainc(n, k+1, p)\n\n def _sf_skip(self, x, n, p):\n # skip because special.nbdtrc doesn't work for 0<n<1\n k = floor(x)\n return special.nbdtrc(k, n, p)\n\n def _ppf(self, q, n, p):\n vals = ceil(special.nbdtrik(q, n, p))\n vals1 = (vals-1).clip(0.0, np.inf)\n temp = self._cdf(vals1, n, p)\n return np.where(temp >= q, vals1, vals)\n\n def _stats(self, n, p):\n Q = 1.0 / p\n P = Q - 1.0\n mu = n*P\n var = n*P*Q\n g1 = (Q+P)/sqrt(n*P*Q)\n g2 = (1.0 + 6*P*Q) / (n*P*Q)\n return mu, var, g1, g2\nnbinom = nbinom_gen(name='nbinom')\n\n\nclass geom_gen(rv_discrete):\n \"\"\"A geometric discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `geom` is::\n\n geom.pmf(k) = (1-p)**(k-1)*p\n\n for ``k >= 1``.\n\n `geom` takes ``p`` as shape parameter.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _rvs(self, p):\n return self._random_state.geometric(p, size=self._size)\n\n def _argcheck(self, p):\n return (p <= 1) & (p >= 0)\n\n def _pmf(self, k, p):\n return np.power(1-p, k-1) * p\n\n def _logpmf(self, k, p):\n return special.xlog1py(k - 1, -p) + log(p)\n\n def _cdf(self, x, p):\n k = floor(x)\n return -expm1(log1p(-p)*k)\n\n def _sf(self, x, p):\n return np.exp(self._logsf(x, p))\n\n def _logsf(self, x, p):\n k = floor(x)\n return k*log1p(-p)\n\n def _ppf(self, q, p):\n vals = ceil(log(1.0-q)/log(1-p))\n temp = self._cdf(vals-1, p)\n return np.where((temp >= q) & (vals > 0), vals-1, vals)\n\n def _stats(self, p):\n mu = 1.0/p\n qr = 1.0-p\n var = qr / p / p\n g1 = (2.0-p) / sqrt(qr)\n g2 = np.polyval([1, -6, 6], p)/(1.0-p)\n return mu, var, g1, g2\ngeom = geom_gen(a=1, name='geom', longname=\"A geometric\")\n\n\nclass hypergeom_gen(rv_discrete):\n r\"\"\"A hypergeometric discrete random variable.\n\n The hypergeometric distribution models drawing objects from a bin.\n `M` is the total number of objects, `n` is total number of Type I objects.\n The random variate represents the number of Type I objects in `N` drawn\n without replacement from the total population.\n\n %(before_notes)s\n\n Notes\n -----\n The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not\n universally accepted. See the Examples for a clarification of the\n definitions used here.\n\n The probability mass function is defined as,\n\n .. math:: p(k, M, n, N) = \\frac{\\binom{n}{k} \\binom{M - n}{N - k}}{\\binom{M}{N}}\n\n for :math:`k \\in [\\max(0, N - M + n), \\min(n, N)]`, where the binomial\n coefficients are defined as,\n\n .. math:: \\binom{n}{k} \\equiv \\frac{n!}{k! (n - k)!}.\n\n %(after_notes)s\n\n Examples\n --------\n >>> from scipy.stats import hypergeom\n >>> import matplotlib.pyplot as plt\n\n Suppose we have a collection of 20 animals, of which 7 are dogs. Then if\n we want to know the probability of finding a given number of dogs if we\n choose at random 12 of the 20 animals, we can initialize a frozen\n distribution and plot the probability mass function:\n\n >>> [M, n, N] = [20, 7, 12]\n >>> rv = hypergeom(M, n, N)\n >>> x = np.arange(0, n+1)\n >>> pmf_dogs = rv.pmf(x)\n\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(x, pmf_dogs, 'bo')\n >>> ax.vlines(x, 0, pmf_dogs, lw=2)\n >>> ax.set_xlabel('# of dogs in our group of chosen animals')\n >>> ax.set_ylabel('hypergeom PMF')\n >>> plt.show()\n\n Instead of using a frozen distribution we can also use `hypergeom`\n methods directly. To for example obtain the cumulative distribution\n function, use:\n\n >>> prb = hypergeom.cdf(x, M, n, N)\n\n And to generate random numbers:\n\n >>> R = hypergeom.rvs(M, n, N, size=10)\n\n \"\"\"\n def _rvs(self, M, n, N):\n return self._random_state.hypergeometric(n, M-n, N, size=self._size)\n\n def _argcheck(self, M, n, N):\n cond = (M > 0) & (n >= 0) & (N >= 0)\n cond &= (n <= M) & (N <= M)\n self.a = np.maximum(N-(M-n), 0)\n self.b = np.minimum(n, N)\n return cond\n\n def _logpmf(self, k, M, n, N):\n tot, good = M, n\n bad = tot - good\n return betaln(good+1, 1) + betaln(bad+1,1) + betaln(tot-N+1, N+1)\\\n - betaln(k+1, good-k+1) - betaln(N-k+1,bad-N+k+1)\\\n - betaln(tot+1, 1)\n\n def _pmf(self, k, M, n, N):\n # same as the following but numerically more precise\n # return comb(good, k) * comb(bad, N-k) / comb(tot, N)\n return exp(self._logpmf(k, M, n, N))\n\n def _stats(self, M, n, N):\n # tot, good, sample_size = M, n, N\n # \"wikipedia\".replace('N', 'M').replace('n', 'N').replace('K', 'n')\n M, n, N = 1.*M, 1.*n, 1.*N\n m = M - n\n p = n/M\n mu = N*p\n\n var = m*n*N*(M - N)*1.0/(M*M*(M-1))\n g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))\n\n g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m\n g2 *= (M-1)*M*M\n g2 += 6.*n*N*(M-N)*m*(5.*M-6)\n g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)\n return mu, var, g1, g2\n\n def _entropy(self, M, n, N):\n k = np.r_[N - (M - n):min(n, N) + 1]\n vals = self.pmf(k, M, n, N)\n return np.sum(entr(vals), axis=0)\n\n def _sf(self, k, M, n, N):\n \"\"\"More precise calculation, 1 - cdf doesn't cut it.\"\"\"\n # This for loop is needed because `k` can be an array. If that's the\n # case, the sf() method makes M, n and N arrays of the same shape. We\n # therefore unpack all inputs args, so we can do the manual\n # integration.\n res = []\n for quant, tot, good, draw in zip(k, M, n, N):\n # Manual integration over probability mass function. More accurate\n # than integrate.quad.\n k2 = np.arange(quant + 1, draw + 1)\n res.append(np.sum(self._pmf(k2, tot, good, draw)))\n return np.asarray(res)\n \n def _logsf(self, k, M, n, N):\n \"\"\"\n More precise calculation than log(sf)\n \"\"\"\n res = []\n for quant, tot, good, draw in zip(k, M, n, N):\n # Integration over probability mass function using logsumexp\n k2 = np.arange(quant + 1, draw + 1)\n res.append(logsumexp(self._logpmf(k2, tot, good, draw)))\n return np.asarray(res)\nhypergeom = hypergeom_gen(name='hypergeom')\n\n\n# FIXME: Fails _cdfvec\nclass logser_gen(rv_discrete):\n \"\"\"A Logarithmic (Log-Series, Series) discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `logser` is::\n\n logser.pmf(k) = - p**k / (k*log(1-p))\n\n for ``k >= 1``.\n\n `logser` takes ``p`` as shape parameter.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _rvs(self, p):\n # looks wrong for p>0.5, too few k=1\n # trying to use generic is worse, no k=1 at all\n return self._random_state.logseries(p, size=self._size)\n\n def _argcheck(self, p):\n return (p > 0) & (p < 1)\n\n def _pmf(self, k, p):\n return -np.power(p, k) * 1.0 / k / special.log1p(-p)\n\n def _stats(self, p):\n r = special.log1p(-p)\n mu = p / (p - 1.0) / r\n mu2p = -p / r / (p - 1.0)**2\n var = mu2p - mu*mu\n mu3p = -p / r * (1.0+p) / (1.0 - p)**3\n mu3 = mu3p - 3*mu*mu2p + 2*mu**3\n g1 = mu3 / np.power(var, 1.5)\n\n mu4p = -p / r * (\n 1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)\n mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4\n g2 = mu4 / var**2 - 3.0\n return mu, var, g1, g2\nlogser = logser_gen(a=1, name='logser', longname='A logarithmic')\n\n\nclass poisson_gen(rv_discrete):\n \"\"\"A Poisson discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `poisson` is::\n\n poisson.pmf(k) = exp(-mu) * mu**k / k!\n\n for ``k >= 0``.\n\n `poisson` takes ``mu`` as shape parameter.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n\n # Override rv_discrete._argcheck to allow mu=0.\n def _argcheck(self, mu):\n return mu >= 0\n\n def _rvs(self, mu):\n return self._random_state.poisson(mu, self._size)\n\n def _logpmf(self, k, mu):\n Pk = special.xlogy(k, mu) - gamln(k + 1) - mu\n return Pk\n\n def _pmf(self, k, mu):\n return exp(self._logpmf(k, mu))\n\n def _cdf(self, x, mu):\n k = floor(x)\n return special.pdtr(k, mu)\n\n def _sf(self, x, mu):\n k = floor(x)\n return special.pdtrc(k, mu)\n\n def _ppf(self, q, mu):\n vals = ceil(special.pdtrik(q, mu))\n vals1 = np.maximum(vals - 1, 0)\n temp = special.pdtr(vals1, mu)\n return np.where(temp >= q, vals1, vals)\n\n def _stats(self, mu):\n var = mu\n tmp = np.asarray(mu)\n mu_nonzero = tmp > 0\n g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)\n g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)\n return mu, var, g1, g2\n\npoisson = poisson_gen(name=\"poisson\", longname='A Poisson')\n\n\nclass planck_gen(rv_discrete):\n \"\"\"A Planck discrete exponential random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `planck` is::\n\n planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)\n\n for ``k*lambda_ >= 0``.\n\n `planck` takes ``lambda_`` as shape parameter.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _argcheck(self, lambda_):\n self.a = np.where(lambda_ > 0, 0, -np.inf)\n self.b = np.where(lambda_ > 0, np.inf, 0)\n return lambda_ != 0\n\n def _pmf(self, k, lambda_):\n fact = (1-exp(-lambda_))\n return fact*exp(-lambda_*k)\n\n def _cdf(self, x, lambda_):\n k = floor(x)\n return 1-exp(-lambda_*(k+1))\n\n def _sf(self, x, lambda_):\n return np.exp(self._logsf(x, lambda_))\n\n def _logsf(self, x, lambda_):\n k = floor(x)\n return -lambda_*(k+1)\n\n def _ppf(self, q, lambda_):\n vals = ceil(-1.0/lambda_ * log1p(-q)-1)\n vals1 = (vals-1).clip(self.a, np.inf)\n temp = self._cdf(vals1, lambda_)\n return np.where(temp >= q, vals1, vals)\n\n def _stats(self, lambda_):\n mu = 1/(exp(lambda_)-1)\n var = exp(-lambda_)/(expm1(-lambda_))**2\n g1 = 2*cosh(lambda_/2.0)\n g2 = 4+2*cosh(lambda_)\n return mu, var, g1, g2\n\n def _entropy(self, lambda_):\n l = lambda_\n C = (1-exp(-l))\n return l*exp(-l)/C - log(C)\nplanck = planck_gen(name='planck', longname='A discrete exponential ')\n\n\nclass boltzmann_gen(rv_discrete):\n \"\"\"A Boltzmann (Truncated Discrete Exponential) random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `boltzmann` is::\n\n boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))\n\n for ``k = 0,..., N-1``.\n\n `boltzmann` takes ``lambda_`` and ``N`` as shape parameters.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _pmf(self, k, lambda_, N):\n fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))\n return fact*exp(-lambda_*k)\n\n def _cdf(self, x, lambda_, N):\n k = floor(x)\n return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))\n\n def _ppf(self, q, lambda_, N):\n qnew = q*(1-exp(-lambda_*N))\n vals = ceil(-1.0/lambda_ * log(1-qnew)-1)\n vals1 = (vals-1).clip(0.0, np.inf)\n temp = self._cdf(vals1, lambda_, N)\n return np.where(temp >= q, vals1, vals)\n\n def _stats(self, lambda_, N):\n z = exp(-lambda_)\n zN = exp(-lambda_*N)\n mu = z/(1.0-z)-N*zN/(1-zN)\n var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2\n trm = (1-zN)/(1-z)\n trm2 = (z*trm**2 - N*N*zN)\n g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)\n g1 = g1 / trm2**(1.5)\n g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)\n g2 = g2 / trm2 / trm2\n return mu, var, g1, g2\nboltzmann = boltzmann_gen(name='boltzmann',\n longname='A truncated discrete exponential ')\n\n\nclass randint_gen(rv_discrete):\n \"\"\"A uniform discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `randint` is::\n\n randint.pmf(k) = 1./(high - low)\n\n for ``k = low, ..., high - 1``.\n\n `randint` takes ``low`` and ``high`` as shape parameters.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _argcheck(self, low, high):\n self.a = low\n self.b = high - 1\n return (high > low)\n\n def _pmf(self, k, low, high):\n p = np.ones_like(k) / (high - low)\n return np.where((k >= low) & (k < high), p, 0.)\n\n def _cdf(self, x, low, high):\n k = floor(x)\n return (k - low + 1.) / (high - low)\n\n def _ppf(self, q, low, high):\n vals = ceil(q * (high - low) + low) - 1\n vals1 = (vals - 1).clip(low, high)\n temp = self._cdf(vals1, low, high)\n return np.where(temp >= q, vals1, vals)\n\n def _stats(self, low, high):\n m2, m1 = np.asarray(high), np.asarray(low)\n mu = (m2 + m1 - 1.0) / 2\n d = m2 - m1\n var = (d*d - 1) / 12.0\n g1 = 0.0\n g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)\n return mu, var, g1, g2\n\n def _rvs(self, low, high):\n \"\"\"An array of *size* random integers >= ``low`` and < ``high``.\"\"\"\n if self._size is not None:\n # Numpy's RandomState.randint() doesn't broadcast its arguments.\n # Use `broadcast_to()` to extend the shapes of low and high\n # up to self._size. Then we can use the numpy.vectorize'd\n # randint without needing to pass it a `size` argument.\n low = broadcast_to(low, self._size)\n high = broadcast_to(high, self._size)\n randint = np.vectorize(self._random_state.randint, otypes=[np.int_])\n return randint(low, high)\n\n def _entropy(self, low, high):\n return log(high - low)\n\nrandint = randint_gen(name='randint', longname='A discrete uniform '\n '(random integer)')\n\n\n# FIXME: problems sampling.\nclass zipf_gen(rv_discrete):\n \"\"\"A Zipf discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `zipf` is::\n\n zipf.pmf(k, a) = 1/(zeta(a) * k**a)\n\n for ``k >= 1``.\n\n `zipf` takes ``a`` as shape parameter.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _rvs(self, a):\n return self._random_state.zipf(a, size=self._size)\n\n def _argcheck(self, a):\n return a > 1\n\n def _pmf(self, k, a):\n Pk = 1.0 / special.zeta(a, 1) / k**a\n return Pk\n\n def _munp(self, n, a):\n return _lazywhere(\n a > n + 1, (a, n),\n lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),\n np.inf)\nzipf = zipf_gen(a=1, name='zipf', longname='A Zipf')\n\n\nclass dlaplace_gen(rv_discrete):\n \"\"\"A Laplacian discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n The probability mass function for `dlaplace` is::\n\n dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))\n\n for ``a > 0``.\n\n `dlaplace` takes ``a`` as shape parameter.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _pmf(self, k, a):\n return tanh(a/2.0) * exp(-a * abs(k))\n\n def _cdf(self, x, a):\n k = floor(x)\n f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)\n f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)\n return _lazywhere(k >= 0, (k, a), f=f, f2=f2)\n\n def _ppf(self, q, a):\n const = 1 + exp(a)\n vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,\n -log((1-q) * const) / a))\n vals1 = vals - 1\n return np.where(self._cdf(vals1, a) >= q, vals1, vals)\n\n def _stats(self, a):\n ea = exp(a)\n mu2 = 2.*ea/(ea-1.)**2\n mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4\n return 0., mu2, 0., mu4/mu2**2 - 3.\n\n def _entropy(self, a):\n return a / sinh(a) - log(tanh(a/2.0))\ndlaplace = dlaplace_gen(a=-np.inf,\n name='dlaplace', longname='A discrete Laplacian')\n\n\nclass skellam_gen(rv_discrete):\n \"\"\"A Skellam discrete random variable.\n\n %(before_notes)s\n\n Notes\n -----\n Probability distribution of the difference of two correlated or\n uncorrelated Poisson random variables.\n\n Let k1 and k2 be two Poisson-distributed r.v. with expected values\n lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with\n parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and\n ``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation\n coefficient between k1 and k2. If the two Poisson-distributed r.v.\n are independent then ``rho = 0``.\n\n Parameters mu1 and mu2 must be strictly positive.\n\n For details see: http://en.wikipedia.org/wiki/Skellam_distribution\n\n `skellam` takes ``mu1`` and ``mu2`` as shape parameters.\n\n %(after_notes)s\n\n %(example)s\n\n \"\"\"\n def _rvs(self, mu1, mu2):\n n = self._size\n return (self._random_state.poisson(mu1, n) -\n self._random_state.poisson(mu2, n))\n\n def _pmf(self, x, mu1, mu2):\n px = np.where(x < 0,\n _ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,\n _ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)\n # ncx2.pdf() returns nan's for extremely low probabilities\n return px\n\n def _cdf(self, x, mu1, mu2):\n x = floor(x)\n px = np.where(x < 0,\n _ncx2_cdf(2*mu2, -2*x, 2*mu1),\n 1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))\n return px\n\n def _stats(self, mu1, mu2):\n mean = mu1 - mu2\n var = mu1 + mu2\n g1 = mean / sqrt((var)**3)\n g2 = 1 / var\n return mean, var, g1, g2\nskellam = skellam_gen(a=-np.inf, name=\"skellam\", longname='A Skellam')\n\n\n# Collect names of classes and objects in this module.\npairs = list(globals().items())\n_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)\n\n__all__ = _distn_names + _distn_gen_names\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n# pylint: disable-msg=W0612,E1101\n\nfrom warnings import catch_warnings\nimport re\nimport operator\nimport pytest\n\nfrom numpy.random import randn\n\nimport numpy as np\n\nfrom pandas.core.api import DataFrame, Panel\nfrom pandas.core.computation import expressions as expr\nfrom pandas import compat, _np_version_under1p11, _np_version_under1p13\nfrom pandas.util.testing import (assert_almost_equal, assert_series_equal,\n assert_frame_equal, assert_panel_equal,\n assert_panel4d_equal, slow)\nfrom pandas.io.formats.printing import pprint_thing\nimport pandas.util.testing as tm\n\n\n_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')\n_frame2 = DataFrame(randn(100, 4), columns=list('ABCD'), dtype='float64')\n_mixed = DataFrame({'A': _frame['A'].copy(),\n 'B': _frame['B'].astype('float32'),\n 'C': _frame['C'].astype('int64'),\n 'D': _frame['D'].astype('int32')})\n_mixed2 = DataFrame({'A': _frame2['A'].copy(),\n 'B': _frame2['B'].astype('float32'),\n 'C': _frame2['C'].astype('int64'),\n 'D': _frame2['D'].astype('int32')})\n_integer = DataFrame(\n np.random.randint(1, 100,\n size=(10001, 4)),\n columns=list('ABCD'), dtype='int64')\n_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),\n columns=list('ABCD'), dtype='int64')\n\nwith catch_warnings(record=True):\n _frame_panel = Panel(dict(ItemA=_frame.copy(),\n ItemB=(_frame.copy() + 3),\n ItemC=_frame.copy(),\n ItemD=_frame.copy()))\n _frame2_panel = Panel(dict(ItemA=_frame2.copy(),\n ItemB=(_frame2.copy() + 3),\n ItemC=_frame2.copy(),\n ItemD=_frame2.copy()))\n _integer_panel = Panel(dict(ItemA=_integer,\n ItemB=(_integer + 34).astype('int64')))\n _integer2_panel = Panel(dict(ItemA=_integer2,\n ItemB=(_integer2 + 34).astype('int64')))\n _mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))\n _mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))\n\n\[email protected](not expr._USE_NUMEXPR, reason='not using numexpr')\nclass TestExpressions(object):\n\n def setup_method(self, method):\n\n self.frame = _frame.copy()\n self.frame2 = _frame2.copy()\n self.mixed = _mixed.copy()\n self.mixed2 = _mixed2.copy()\n self.integer = _integer.copy()\n self._MIN_ELEMENTS = expr._MIN_ELEMENTS\n\n def teardown_method(self, method):\n expr._MIN_ELEMENTS = self._MIN_ELEMENTS\n\n def run_arithmetic(self, df, other, assert_func, check_dtype=False,\n test_flex=True):\n expr._MIN_ELEMENTS = 0\n operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow']\n if not compat.PY3:\n operations.append('div')\n for arith in operations:\n\n # numpy >= 1.11 doesn't handle integers\n # raised to integer powers\n # https://github.com/pandas-dev/pandas/issues/15363\n if arith == 'pow' and not _np_version_under1p11:\n continue\n\n operator_name = arith\n if arith == 'div':\n operator_name = 'truediv'\n\n if test_flex:\n op = lambda x, y: getattr(df, arith)(y)\n op.__name__ = arith\n else:\n op = getattr(operator, operator_name)\n expr.set_use_numexpr(False)\n expected = op(df, other)\n expr.set_use_numexpr(True)\n\n result = op(df, other)\n try:\n if check_dtype:\n if arith == 'truediv':\n assert expected.dtype.kind == 'f'\n assert_func(expected, result)\n except Exception:\n pprint_thing(\"Failed test with operator %r\" % op.__name__)\n raise\n\n def test_integer_arithmetic(self):\n self.run_arithmetic(self.integer, self.integer,\n assert_frame_equal)\n self.run_arithmetic(self.integer.iloc[:, 0],\n self.integer.iloc[:, 0], assert_series_equal,\n check_dtype=True)\n\n def run_binary(self, df, other, assert_func, test_flex=False,\n numexpr_ops=set(['gt', 'lt', 'ge', 'le', 'eq', 'ne'])):\n \"\"\"\n tests solely that the result is the same whether or not numexpr is\n enabled. Need to test whether the function does the correct thing\n elsewhere.\n \"\"\"\n expr._MIN_ELEMENTS = 0\n expr.set_test_mode(True)\n operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']\n for arith in operations:\n if test_flex:\n op = lambda x, y: getattr(df, arith)(y)\n op.__name__ = arith\n else:\n op = getattr(operator, arith)\n expr.set_use_numexpr(False)\n expected = op(df, other)\n expr.set_use_numexpr(True)\n expr.get_test_result()\n result = op(df, other)\n used_numexpr = expr.get_test_result()\n try:\n if arith in numexpr_ops:\n assert used_numexpr, \"Did not use numexpr as expected.\"\n else:\n assert not used_numexpr, \"Used numexpr unexpectedly.\"\n assert_func(expected, result)\n except Exception:\n pprint_thing(\"Failed test with operation %r\" % arith)\n pprint_thing(\"test_flex was %r\" % test_flex)\n raise\n\n def run_frame(self, df, other, binary_comp=None, run_binary=True,\n **kwargs):\n self.run_arithmetic(df, other, assert_frame_equal,\n test_flex=False, **kwargs)\n self.run_arithmetic(df, other, assert_frame_equal, test_flex=True,\n **kwargs)\n if run_binary:\n if binary_comp is None:\n expr.set_use_numexpr(False)\n binary_comp = other + 1\n expr.set_use_numexpr(True)\n self.run_binary(df, binary_comp, assert_frame_equal,\n test_flex=False, **kwargs)\n self.run_binary(df, binary_comp, assert_frame_equal,\n test_flex=True, **kwargs)\n\n def run_series(self, ser, other, binary_comp=None, **kwargs):\n self.run_arithmetic(ser, other, assert_series_equal,\n test_flex=False, **kwargs)\n self.run_arithmetic(ser, other, assert_almost_equal,\n test_flex=True, **kwargs)\n # series doesn't uses vec_compare instead of numexpr...\n # if binary_comp is None:\n # binary_comp = other + 1\n # self.run_binary(ser, binary_comp, assert_frame_equal,\n # test_flex=False, **kwargs)\n # self.run_binary(ser, binary_comp, assert_frame_equal,\n # test_flex=True, **kwargs)\n\n def run_panel(self, panel, other, binary_comp=None, run_binary=True,\n assert_func=assert_panel_equal, **kwargs):\n self.run_arithmetic(panel, other, assert_func, test_flex=False,\n **kwargs)\n self.run_arithmetic(panel, other, assert_func, test_flex=True,\n **kwargs)\n if run_binary:\n if binary_comp is None:\n binary_comp = other + 1\n self.run_binary(panel, binary_comp, assert_func,\n test_flex=False, **kwargs)\n self.run_binary(panel, binary_comp, assert_func,\n test_flex=True, **kwargs)\n\n def test_integer_arithmetic_frame(self):\n self.run_frame(self.integer, self.integer)\n\n def test_integer_arithmetic_series(self):\n self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0])\n\n @slow\n def test_integer_panel(self):\n self.run_panel(_integer2_panel, np.random.randint(1, 100))\n\n def test_float_arithemtic_frame(self):\n self.run_frame(self.frame2, self.frame2)\n\n def test_float_arithmetic_series(self):\n self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0])\n\n @slow\n def test_float_panel(self):\n self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8)\n\n @slow\n def test_panel4d(self):\n with catch_warnings(record=True):\n self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5,\n assert_func=assert_panel4d_equal, binary_comp=3)\n\n def test_mixed_arithmetic_frame(self):\n # TODO: FIGURE OUT HOW TO GET IT TO WORK...\n # can't do arithmetic because comparison methods try to do *entire*\n # frame instead of by-column\n self.run_frame(self.mixed2, self.mixed2, run_binary=False)\n\n def test_mixed_arithmetic_series(self):\n for col in self.mixed2.columns:\n self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)\n\n @slow\n def test_mixed_panel(self):\n self.run_panel(_mixed2_panel, np.random.randint(1, 100),\n binary_comp=-2)\n\n def test_float_arithemtic(self):\n self.run_arithmetic(self.frame, self.frame, assert_frame_equal)\n self.run_arithmetic(self.frame.iloc[:, 0], self.frame.iloc[:, 0],\n assert_series_equal, check_dtype=True)\n\n def test_mixed_arithmetic(self):\n self.run_arithmetic(self.mixed, self.mixed, assert_frame_equal)\n for col in self.mixed.columns:\n self.run_arithmetic(self.mixed[col], self.mixed[col],\n assert_series_equal)\n\n def test_integer_with_zeros(self):\n self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))\n self.run_arithmetic(self.integer, self.integer,\n assert_frame_equal)\n self.run_arithmetic(self.integer.iloc[:, 0],\n self.integer.iloc[:, 0], assert_series_equal)\n\n def test_invalid(self):\n\n # no op\n result = expr._can_use_numexpr(operator.add, None, self.frame,\n self.frame, 'evaluate')\n assert not result\n\n # mixed\n result = expr._can_use_numexpr(operator.add, '+', self.mixed,\n self.frame, 'evaluate')\n assert not result\n\n # min elements\n result = expr._can_use_numexpr(operator.add, '+', self.frame2,\n self.frame2, 'evaluate')\n assert not result\n\n # ok, we only check on first part of expression\n result = expr._can_use_numexpr(operator.add, '+', self.frame,\n self.frame2, 'evaluate')\n assert result\n\n def test_binary_ops(self):\n def testit():\n\n for f, f2 in [(self.frame, self.frame2),\n (self.mixed, self.mixed2)]:\n\n for op, op_str in [('add', '+'), ('sub', '-'), ('mul', '*'),\n ('div', '/'), ('pow', '**')]:\n\n # numpy >= 1.11 doesn't handle integers\n # raised to integer powers\n # https://github.com/pandas-dev/pandas/issues/15363\n if op == 'pow' and not _np_version_under1p11:\n continue\n\n if op == 'div':\n op = getattr(operator, 'truediv', None)\n else:\n op = getattr(operator, op, None)\n if op is not None:\n result = expr._can_use_numexpr(op, op_str, f, f,\n 'evaluate')\n assert result != f._is_mixed_type\n\n result = expr.evaluate(op, op_str, f, f,\n use_numexpr=True)\n expected = expr.evaluate(op, op_str, f, f,\n use_numexpr=False)\n\n if isinstance(result, DataFrame):\n tm.assert_frame_equal(result, expected)\n else:\n tm.assert_numpy_array_equal(result,\n expected.values)\n\n result = expr._can_use_numexpr(op, op_str, f2, f2,\n 'evaluate')\n assert not result\n\n expr.set_use_numexpr(False)\n testit()\n expr.set_use_numexpr(True)\n expr.set_numexpr_threads(1)\n testit()\n expr.set_numexpr_threads()\n testit()\n\n def test_boolean_ops(self):\n def testit():\n for f, f2 in [(self.frame, self.frame2),\n (self.mixed, self.mixed2)]:\n\n f11 = f\n f12 = f + 1\n\n f21 = f2\n f22 = f2 + 1\n\n for op, op_str in [('gt', '>'), ('lt', '<'), ('ge', '>='),\n ('le', '<='), ('eq', '=='), ('ne', '!=')]:\n\n op = getattr(operator, op)\n\n result = expr._can_use_numexpr(op, op_str, f11, f12,\n 'evaluate')\n assert result != f11._is_mixed_type\n\n result = expr.evaluate(op, op_str, f11, f12,\n use_numexpr=True)\n expected = expr.evaluate(op, op_str, f11, f12,\n use_numexpr=False)\n if isinstance(result, DataFrame):\n tm.assert_frame_equal(result, expected)\n else:\n tm.assert_numpy_array_equal(result, expected.values)\n\n result = expr._can_use_numexpr(op, op_str, f21, f22,\n 'evaluate')\n assert not result\n\n expr.set_use_numexpr(False)\n testit()\n expr.set_use_numexpr(True)\n expr.set_numexpr_threads(1)\n testit()\n expr.set_numexpr_threads()\n testit()\n\n def test_where(self):\n def testit():\n for f in [self.frame, self.frame2, self.mixed, self.mixed2]:\n\n for cond in [True, False]:\n\n c = np.empty(f.shape, dtype=np.bool_)\n c.fill(cond)\n result = expr.where(c, f.values, f.values + 1)\n expected = np.where(c, f.values, f.values + 1)\n tm.assert_numpy_array_equal(result, expected)\n\n expr.set_use_numexpr(False)\n testit()\n expr.set_use_numexpr(True)\n expr.set_numexpr_threads(1)\n testit()\n expr.set_numexpr_threads()\n testit()\n\n def test_bool_ops_raise_on_arithmetic(self):\n df = DataFrame({'a': np.random.rand(10) > 0.5,\n 'b': np.random.rand(10) > 0.5})\n names = 'div', 'truediv', 'floordiv', 'pow'\n ops = '/', '/', '//', '**'\n msg = 'operator %r not implemented for bool dtypes'\n for op, name in zip(ops, names):\n if not compat.PY3 or name != 'div':\n f = getattr(operator, name)\n err_msg = re.escape(msg % op)\n\n with tm.assert_raises_regex(NotImplementedError, err_msg):\n f(df, df)\n\n with tm.assert_raises_regex(NotImplementedError, err_msg):\n f(df.a, df.b)\n\n with tm.assert_raises_regex(NotImplementedError, err_msg):\n f(df.a, True)\n\n with tm.assert_raises_regex(NotImplementedError, err_msg):\n f(False, df.a)\n\n with tm.assert_raises_regex(TypeError, err_msg):\n f(False, df)\n\n with tm.assert_raises_regex(TypeError, err_msg):\n f(df, True)\n\n def test_bool_ops_warn_on_arithmetic(self):\n n = 10\n df = DataFrame({'a': np.random.rand(n) > 0.5,\n 'b': np.random.rand(n) > 0.5})\n names = 'add', 'mul', 'sub'\n ops = '+', '*', '-'\n subs = {'+': '|', '*': '&', '-': '^'}\n sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'}\n for op, name in zip(ops, names):\n f = getattr(operator, name)\n fe = getattr(operator, sub_funcs[subs[op]])\n\n # >= 1.13.0 these are now TypeErrors\n if op == '-' and not _np_version_under1p13:\n continue\n\n with tm.use_numexpr(True, min_elements=5):\n with tm.assert_produces_warning(check_stacklevel=False):\n r = f(df, df)\n e = fe(df, df)\n tm.assert_frame_equal(r, e)\n\n with tm.assert_produces_warning(check_stacklevel=False):\n r = f(df.a, df.b)\n e = fe(df.a, df.b)\n tm.assert_series_equal(r, e)\n\n with tm.assert_produces_warning(check_stacklevel=False):\n r = f(df.a, True)\n e = fe(df.a, True)\n tm.assert_series_equal(r, e)\n\n with tm.assert_produces_warning(check_stacklevel=False):\n r = f(False, df.a)\n e = fe(False, df.a)\n tm.assert_series_equal(r, e)\n\n with tm.assert_produces_warning(check_stacklevel=False):\n r = f(False, df)\n e = fe(False, df)\n tm.assert_frame_equal(r, e)\n\n with tm.assert_produces_warning(check_stacklevel=False):\n r = f(df, True)\n e = fe(df, True)\n tm.assert_frame_equal(r, e)\n",
"from __future__ import division, print_function, absolute_import\n\nfrom . import _nnls\nfrom numpy import asarray_chkfinite, zeros, double\n\n__all__ = ['nnls']\n\n\ndef nnls(A, b):\n \"\"\"\n Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper\n for a FORTRAN non-negative least squares solver.\n\n Parameters\n ----------\n A : ndarray\n Matrix ``A`` as shown above.\n b : ndarray\n Right-hand side vector.\n\n Returns\n -------\n x : ndarray\n Solution vector.\n rnorm : float\n The residual, ``|| Ax-b ||_2``.\n\n Notes\n -----\n The FORTRAN code was published in the book below. The algorithm\n is an active set method. It solves the KKT (Karush-Kuhn-Tucker)\n conditions for the non-negative least squares problem.\n\n References\n ----------\n Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM\n\n \"\"\"\n\n A, b = map(asarray_chkfinite, (A, b))\n\n if len(A.shape) != 2:\n raise ValueError(\"expected matrix\")\n if len(b.shape) != 1:\n raise ValueError(\"expected vector\")\n\n m, n = A.shape\n\n if m != b.shape[0]:\n raise ValueError(\"incompatible dimensions\")\n\n w = zeros((n,), dtype=double)\n zz = zeros((m,), dtype=double)\n index = zeros((n,), dtype=int)\n\n x, rnorm, mode = _nnls.nnls(A, m, n, b, w, zz, index)\n if mode != 1:\n raise RuntimeError(\"too many iterations\")\n\n return x, rnorm\n",
"# -*- coding: latin-1 -*-\n''' Nose test generators\n\nNeed function load / save / roundtrip tests\n\n'''\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nfrom os.path import join as pjoin, dirname\nfrom glob import glob\nfrom io import BytesIO\nfrom tempfile import mkdtemp\n\nfrom scipy._lib.six import u, text_type, string_types\n\nimport warnings\nimport shutil\nimport gzip\n\nfrom numpy.testing import (assert_array_equal, assert_array_almost_equal,\n assert_equal, assert_)\nfrom pytest import raises as assert_raises\nfrom scipy._lib._numpy_compat import suppress_warnings\n\nimport numpy as np\nfrom numpy import array\nimport scipy.sparse as SP\n\nimport scipy.io.matlab.byteordercodes as boc\nfrom scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError\nfrom scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat)\nfrom scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader,\n MatlabFunction, varmats_from_mat,\n to_writeable, EmptyStructMarker)\nfrom scipy.io.matlab import mio5_params as mio5p\n\ntest_data_path = pjoin(dirname(__file__), 'data')\n\n\ndef mlarr(*args, **kwargs):\n \"\"\"Convenience function to return matlab-compatible 2D array.\"\"\"\n arr = np.array(*args, **kwargs)\n arr.shape = matdims(arr)\n return arr\n\n# Define cases to test\ntheta = np.pi/4*np.arange(9,dtype=float).reshape(1,9)\ncase_table4 = [\n {'name': 'double',\n 'classes': {'testdouble': 'double'},\n 'expected': {'testdouble': theta}\n }]\ncase_table4.append(\n {'name': 'string',\n 'classes': {'teststring': 'char'},\n 'expected': {'teststring':\n array([u('\"Do nine men interpret?\" \"Nine men,\" I nod.')])}\n })\ncase_table4.append(\n {'name': 'complex',\n 'classes': {'testcomplex': 'double'},\n 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)}\n })\nA = np.zeros((3,5))\nA[0] = list(range(1,6))\nA[:,0] = list(range(1,4))\ncase_table4.append(\n {'name': 'matrix',\n 'classes': {'testmatrix': 'double'},\n 'expected': {'testmatrix': A},\n })\ncase_table4.append(\n {'name': 'sparse',\n 'classes': {'testsparse': 'sparse'},\n 'expected': {'testsparse': SP.coo_matrix(A)},\n })\nB = A.astype(complex)\nB[0,0] += 1j\ncase_table4.append(\n {'name': 'sparsecomplex',\n 'classes': {'testsparsecomplex': 'sparse'},\n 'expected': {'testsparsecomplex': SP.coo_matrix(B)},\n })\ncase_table4.append(\n {'name': 'multi',\n 'classes': {'theta': 'double', 'a': 'double'},\n 'expected': {'theta': theta, 'a': A},\n })\ncase_table4.append(\n {'name': 'minus',\n 'classes': {'testminus': 'double'},\n 'expected': {'testminus': mlarr(-1)},\n })\ncase_table4.append(\n {'name': 'onechar',\n 'classes': {'testonechar': 'char'},\n 'expected': {'testonechar': array([u('r')])},\n })\n# Cell arrays stored as object arrays\nCA = mlarr(( # tuple for object array creation\n [],\n mlarr([1]),\n mlarr([[1,2]]),\n mlarr([[1,2,3]])), dtype=object).reshape(1,-1)\nCA[0,0] = array(\n [u('This cell contains this string and 3 arrays of increasing length')])\ncase_table5 = [\n {'name': 'cell',\n 'classes': {'testcell': 'cell'},\n 'expected': {'testcell': CA}}]\nCAE = mlarr(( # tuple for object array creation\n mlarr(1),\n mlarr(2),\n mlarr([]),\n mlarr([]),\n mlarr(3)), dtype=object).reshape(1,-1)\nobjarr = np.empty((1,1),dtype=object)\nobjarr[0,0] = mlarr(1)\ncase_table5.append(\n {'name': 'scalarcell',\n 'classes': {'testscalarcell': 'cell'},\n 'expected': {'testscalarcell': objarr}\n })\ncase_table5.append(\n {'name': 'emptycell',\n 'classes': {'testemptycell': 'cell'},\n 'expected': {'testemptycell': CAE}})\ncase_table5.append(\n {'name': 'stringarray',\n 'classes': {'teststringarray': 'char'},\n 'expected': {'teststringarray': array(\n [u('one '), u('two '), u('three')])},\n })\ncase_table5.append(\n {'name': '3dmatrix',\n 'classes': {'test3dmatrix': 'double'},\n 'expected': {\n 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))}\n })\nst_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3)\ndtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']]\nst1 = np.zeros((1,1), dtype)\nst1['stringfield'][0,0] = array([u('Rats live on no evil star.')])\nst1['doublefield'][0,0] = st_sub_arr\nst1['complexfield'][0,0] = st_sub_arr * (1 + 1j)\ncase_table5.append(\n {'name': 'struct',\n 'classes': {'teststruct': 'struct'},\n 'expected': {'teststruct': st1}\n })\nCN = np.zeros((1,2), dtype=object)\nCN[0,0] = mlarr(1)\nCN[0,1] = np.zeros((1,3), dtype=object)\nCN[0,1][0,0] = mlarr(2, dtype=np.uint8)\nCN[0,1][0,1] = mlarr([[3]], dtype=np.uint8)\nCN[0,1][0,2] = np.zeros((1,2), dtype=object)\nCN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8)\nCN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8)\ncase_table5.append(\n {'name': 'cellnest',\n 'classes': {'testcellnest': 'cell'},\n 'expected': {'testcellnest': CN},\n })\nst2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']])\nst2[0,0]['one'] = mlarr(1)\nst2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)])\nst2[0,0]['two'][0,0]['three'] = array([u('number 3')])\ncase_table5.append(\n {'name': 'structnest',\n 'classes': {'teststructnest': 'struct'},\n 'expected': {'teststructnest': st2}\n })\na = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']])\na[0,0]['one'] = mlarr(1)\na[0,0]['two'] = mlarr(2)\na[0,1]['one'] = array([u('number 1')])\na[0,1]['two'] = array([u('number 2')])\ncase_table5.append(\n {'name': 'structarr',\n 'classes': {'teststructarr': 'struct'},\n 'expected': {'teststructarr': a}\n })\nODT = np.dtype([(n, object) for n in\n ['expr', 'inputExpr', 'args',\n 'isEmpty', 'numArgs', 'version']])\nMO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline')\nm0 = MO[0,0]\nm0['expr'] = array([u('x')])\nm0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')])\nm0['args'] = array([u('x')])\nm0['isEmpty'] = mlarr(0)\nm0['numArgs'] = mlarr(1)\nm0['version'] = mlarr(1)\ncase_table5.append(\n {'name': 'object',\n 'classes': {'testobject': 'object'},\n 'expected': {'testobject': MO}\n })\nfp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb')\nu_str = fp_u_str.read().decode('utf-8')\nfp_u_str.close()\ncase_table5.append(\n {'name': 'unicode',\n 'classes': {'testunicode': 'char'},\n 'expected': {'testunicode': array([u_str])}\n })\ncase_table5.append(\n {'name': 'sparse',\n 'classes': {'testsparse': 'sparse'},\n 'expected': {'testsparse': SP.coo_matrix(A)},\n })\ncase_table5.append(\n {'name': 'sparsecomplex',\n 'classes': {'testsparsecomplex': 'sparse'},\n 'expected': {'testsparsecomplex': SP.coo_matrix(B)},\n })\ncase_table5.append(\n {'name': 'bool',\n 'classes': {'testbools': 'logical'},\n 'expected': {'testbools':\n array([[True], [False]])},\n })\n\ncase_table5_rt = case_table5[:]\n# Inline functions can't be concatenated in matlab, so RT only\ncase_table5_rt.append(\n {'name': 'objectarray',\n 'classes': {'testobjectarray': 'object'},\n 'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}})\n\n\ndef types_compatible(var1, var2):\n \"\"\"Check if types are same or compatible.\n\n 0-D numpy scalars are compatible with bare python scalars.\n \"\"\"\n type1 = type(var1)\n type2 = type(var2)\n if type1 is type2:\n return True\n if type1 is np.ndarray and var1.shape == ():\n return type(var1.item()) is type2\n if type2 is np.ndarray and var2.shape == ():\n return type(var2.item()) is type1\n return False\n\n\ndef _check_level(label, expected, actual):\n \"\"\" Check one level of a potentially nested array \"\"\"\n if SP.issparse(expected): # allow different types of sparse matrices\n assert_(SP.issparse(actual))\n assert_array_almost_equal(actual.todense(),\n expected.todense(),\n err_msg=label,\n decimal=5)\n return\n # Check types are as expected\n assert_(types_compatible(expected, actual),\n \"Expected type %s, got %s at %s\" %\n (type(expected), type(actual), label))\n # A field in a record array may not be an ndarray\n # A scalar from a record array will be type np.void\n if not isinstance(expected,\n (np.void, np.ndarray, MatlabObject)):\n assert_equal(expected, actual)\n return\n # This is an ndarray-like thing\n assert_(expected.shape == actual.shape,\n msg='Expected shape %s, got %s at %s' % (expected.shape,\n actual.shape,\n label))\n ex_dtype = expected.dtype\n if ex_dtype.hasobject: # array of objects\n if isinstance(expected, MatlabObject):\n assert_equal(expected.classname, actual.classname)\n for i, ev in enumerate(expected):\n level_label = \"%s, [%d], \" % (label, i)\n _check_level(level_label, ev, actual[i])\n return\n if ex_dtype.fields: # probably recarray\n for fn in ex_dtype.fields:\n level_label = \"%s, field %s, \" % (label, fn)\n _check_level(level_label,\n expected[fn], actual[fn])\n return\n if ex_dtype.type in (text_type, # string or bool\n np.unicode_,\n np.bool_):\n assert_equal(actual, expected, err_msg=label)\n return\n # Something numeric\n assert_array_almost_equal(actual, expected, err_msg=label, decimal=5)\n\n\ndef _load_check_case(name, files, case):\n for file_name in files:\n matdict = loadmat(file_name, struct_as_record=True)\n label = \"test %s; file %s\" % (name, file_name)\n for k, expected in case.items():\n k_label = \"%s, variable %s\" % (label, k)\n assert_(k in matdict, \"Missing key at %s\" % k_label)\n _check_level(k_label, expected, matdict[k])\n\n\ndef _whos_check_case(name, files, case, classes):\n for file_name in files:\n label = \"test %s; file %s\" % (name, file_name)\n\n whos = whosmat(file_name)\n\n expected_whos = []\n for k, expected in case.items():\n expected_whos.append((k, expected.shape, classes[k]))\n\n whos.sort()\n expected_whos.sort()\n assert_equal(whos, expected_whos,\n \"%s: %r != %r\" % (label, whos, expected_whos)\n )\n\n\n# Round trip tests\ndef _rt_check_case(name, expected, format):\n mat_stream = BytesIO()\n savemat(mat_stream, expected, format=format)\n mat_stream.seek(0)\n _load_check_case(name, [mat_stream], expected)\n\n\n# generator for load tests\ndef test_load():\n for case in case_table4 + case_table5:\n name = case['name']\n expected = case['expected']\n filt = pjoin(test_data_path, 'test%s_*.mat' % name)\n files = glob(filt)\n assert_(len(files) > 0,\n \"No files for test %s using filter %s\" % (name, filt))\n _load_check_case(name, files, expected)\n\n\n# generator for whos tests\ndef test_whos():\n for case in case_table4 + case_table5:\n name = case['name']\n expected = case['expected']\n classes = case['classes']\n filt = pjoin(test_data_path, 'test%s_*.mat' % name)\n files = glob(filt)\n assert_(len(files) > 0,\n \"No files for test %s using filter %s\" % (name, filt))\n _whos_check_case(name, files, expected, classes)\n\n\n# generator for round trip tests\ndef test_round_trip():\n for case in case_table4 + case_table5_rt:\n case_table4_names = [case['name'] for case in case_table4]\n name = case['name'] + '_round_trip'\n expected = case['expected']\n for format in (['4', '5'] if case['name'] in case_table4_names else ['5']):\n _rt_check_case(name, expected, format)\n\n\ndef test_gzip_simple():\n xdense = np.zeros((20,20))\n xdense[2,3] = 2.3\n xdense[4,5] = 4.5\n x = SP.csc_matrix(xdense)\n\n name = 'gzip_test'\n expected = {'x':x}\n format = '4'\n\n tmpdir = mkdtemp()\n try:\n fname = pjoin(tmpdir,name)\n mat_stream = gzip.open(fname,mode='wb')\n savemat(mat_stream, expected, format=format)\n mat_stream.close()\n\n mat_stream = gzip.open(fname,mode='rb')\n actual = loadmat(mat_stream, struct_as_record=True)\n mat_stream.close()\n finally:\n shutil.rmtree(tmpdir)\n\n assert_array_almost_equal(actual['x'].todense(),\n expected['x'].todense(),\n err_msg=repr(actual))\n\n\ndef test_multiple_open():\n # Ticket #1039, on Windows: check that files are not left open\n tmpdir = mkdtemp()\n try:\n x = dict(x=np.zeros((2, 2)))\n\n fname = pjoin(tmpdir, \"a.mat\")\n\n # Check that file is not left open\n savemat(fname, x)\n os.unlink(fname)\n savemat(fname, x)\n loadmat(fname)\n os.unlink(fname)\n\n # Check that stream is left open\n f = open(fname, 'wb')\n savemat(f, x)\n f.seek(0)\n f.close()\n\n f = open(fname, 'rb')\n loadmat(f)\n f.seek(0)\n f.close()\n finally:\n shutil.rmtree(tmpdir)\n\n\ndef test_mat73():\n # Check any hdf5 files raise an error\n filenames = glob(\n pjoin(test_data_path, 'testhdf5*.mat'))\n assert_(len(filenames) > 0)\n for filename in filenames:\n fp = open(filename, 'rb')\n assert_raises(NotImplementedError,\n loadmat,\n fp,\n struct_as_record=True)\n fp.close()\n\n\ndef test_warnings():\n # This test is an echo of the previous behavior, which was to raise a\n # warning if the user triggered a search for mat files on the Python system\n # path. We can remove the test in the next version after upcoming (0.13)\n fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat')\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n # This should not generate a warning\n mres = loadmat(fname, struct_as_record=True)\n # This neither\n mres = loadmat(fname, struct_as_record=False)\n\n\ndef test_regression_653():\n # Saving a dictionary with only invalid keys used to raise an error. Now we\n # save this as an empty struct in matlab space.\n sio = BytesIO()\n savemat(sio, {'d':{1:2}}, format='5')\n back = loadmat(sio)['d']\n # Check we got an empty struct equivalent\n assert_equal(back.shape, (1,1))\n assert_equal(back.dtype, np.dtype(object))\n assert_(back[0,0] is None)\n\n\ndef test_structname_len():\n # Test limit for length of field names in structs\n lim = 31\n fldname = 'a' * lim\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n savemat(BytesIO(), {'longstruct': st1}, format='5')\n fldname = 'a' * (lim+1)\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n assert_raises(ValueError, savemat, BytesIO(),\n {'longstruct': st1}, format='5')\n\n\ndef test_4_and_long_field_names_incompatible():\n # Long field names option not supported in 4\n my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)])\n assert_raises(ValueError, savemat, BytesIO(),\n {'my_struct':my_struct}, format='4', long_field_names=True)\n\n\ndef test_long_field_names():\n # Test limit for length of field names in structs\n lim = 63\n fldname = 'a' * lim\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True)\n fldname = 'a' * (lim+1)\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n assert_raises(ValueError, savemat, BytesIO(),\n {'longstruct': st1}, format='5',long_field_names=True)\n\n\ndef test_long_field_names_in_struct():\n # Regression test - long_field_names was erased if you passed a struct\n # within a struct\n lim = 63\n fldname = 'a' * lim\n cell = np.ndarray((1,2),dtype=object)\n st1 = np.zeros((1,1), dtype=[(fldname, object)])\n cell[0,0] = st1\n cell[0,1] = st1\n savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True)\n #\n # Check to make sure it fails with long field names off\n #\n assert_raises(ValueError, savemat, BytesIO(),\n {'longstruct': cell}, format='5', long_field_names=False)\n\n\ndef test_cell_with_one_thing_in_it():\n # Regression test - make a cell array that's 1 x 2 and put two\n # strings in it. It works. Make a cell array that's 1 x 1 and put\n # a string in it. It should work but, in the old days, it didn't.\n cells = np.ndarray((1,2),dtype=object)\n cells[0,0] = 'Hello'\n cells[0,1] = 'World'\n savemat(BytesIO(), {'x': cells}, format='5')\n\n cells = np.ndarray((1,1),dtype=object)\n cells[0,0] = 'Hello, world'\n savemat(BytesIO(), {'x': cells}, format='5')\n\n\ndef test_writer_properties():\n # Tests getting, setting of properties of matrix writer\n mfw = MatFile5Writer(BytesIO())\n assert_equal(mfw.global_vars, [])\n mfw.global_vars = ['avar']\n assert_equal(mfw.global_vars, ['avar'])\n assert_equal(mfw.unicode_strings, False)\n mfw.unicode_strings = True\n assert_equal(mfw.unicode_strings, True)\n assert_equal(mfw.long_field_names, False)\n mfw.long_field_names = True\n assert_equal(mfw.long_field_names, True)\n\n\ndef test_use_small_element():\n # Test whether we're using small data element or not\n sio = BytesIO()\n wtr = MatFile5Writer(sio)\n # First check size for no sde for name\n arr = np.zeros(10)\n wtr.put_variables({'aaaaa': arr})\n w_sz = len(sio.getvalue())\n # Check small name results in largish difference in size\n sio.truncate(0)\n sio.seek(0)\n wtr.put_variables({'aaaa': arr})\n assert_(w_sz - len(sio.getvalue()) > 4)\n # Whereas increasing name size makes less difference\n sio.truncate(0)\n sio.seek(0)\n wtr.put_variables({'aaaaaa': arr})\n assert_(len(sio.getvalue()) - w_sz < 4)\n\n\ndef test_save_dict():\n # Test that dict can be saved (as recarray), loaded as matstruct\n dict_types = ((dict, False),)\n try:\n from collections import OrderedDict\n except ImportError:\n pass\n else:\n dict_types += ((OrderedDict, True),)\n ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])\n ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])\n for dict_type, is_ordered in dict_types:\n # Initialize with tuples to keep order for OrderedDict\n d = dict_type([('a', 1), ('b', 2)])\n stream = BytesIO()\n savemat(stream, {'dict': d})\n stream.seek(0)\n vals = loadmat(stream)['dict']\n assert_equal(set(vals.dtype.names), set(['a', 'b']))\n if is_ordered: # Input was ordered, output in ab order\n assert_array_equal(vals, ab_exp)\n else: # Not ordered input, either order output\n if vals.dtype.names[0] == 'a':\n assert_array_equal(vals, ab_exp)\n else:\n assert_array_equal(vals, ba_exp)\n\n\ndef test_1d_shape():\n # New 5 behavior is 1D -> row vector\n arr = np.arange(5)\n for format in ('4', '5'):\n # Column is the default\n stream = BytesIO()\n savemat(stream, {'oned': arr}, format=format)\n vals = loadmat(stream)\n assert_equal(vals['oned'].shape, (1, 5))\n # can be explicitly 'column' for oned_as\n stream = BytesIO()\n savemat(stream, {'oned':arr},\n format=format,\n oned_as='column')\n vals = loadmat(stream)\n assert_equal(vals['oned'].shape, (5,1))\n # but different from 'row'\n stream = BytesIO()\n savemat(stream, {'oned':arr},\n format=format,\n oned_as='row')\n vals = loadmat(stream)\n assert_equal(vals['oned'].shape, (1,5))\n\n\ndef test_compression():\n arr = np.zeros(100).reshape((5,20))\n arr[2,10] = 1\n stream = BytesIO()\n savemat(stream, {'arr':arr})\n raw_len = len(stream.getvalue())\n vals = loadmat(stream)\n assert_array_equal(vals['arr'], arr)\n stream = BytesIO()\n savemat(stream, {'arr':arr}, do_compression=True)\n compressed_len = len(stream.getvalue())\n vals = loadmat(stream)\n assert_array_equal(vals['arr'], arr)\n assert_(raw_len > compressed_len)\n # Concatenate, test later\n arr2 = arr.copy()\n arr2[0,0] = 1\n stream = BytesIO()\n savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)\n vals = loadmat(stream)\n assert_array_equal(vals['arr2'], arr2)\n stream = BytesIO()\n savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)\n vals = loadmat(stream)\n assert_array_equal(vals['arr2'], arr2)\n\n\ndef test_single_object():\n stream = BytesIO()\n savemat(stream, {'A':np.array(1, dtype=object)})\n\n\ndef test_skip_variable():\n # Test skipping over the first of two variables in a MAT file\n # using mat_reader_factory and put_variables to read them in.\n #\n # This is a regression test of a problem that's caused by\n # using the compressed file reader seek instead of the raw file\n # I/O seek when skipping over a compressed chunk.\n #\n # The problem arises when the chunk is large: this file has\n # a 256x256 array of random (uncompressible) doubles.\n #\n filename = pjoin(test_data_path,'test_skip_variable.mat')\n #\n # Prove that it loads with loadmat\n #\n d = loadmat(filename, struct_as_record=True)\n assert_('first' in d)\n assert_('second' in d)\n #\n # Make the factory\n #\n factory, file_opened = mat_reader_factory(filename, struct_as_record=True)\n #\n # This is where the factory breaks with an error in MatMatrixGetter.to_next\n #\n d = factory.get_variables('second')\n assert_('second' in d)\n factory.mat_stream.close()\n\n\ndef test_empty_struct():\n # ticket 885\n filename = pjoin(test_data_path,'test_empty_struct.mat')\n # before ticket fix, this would crash with ValueError, empty data\n # type\n d = loadmat(filename, struct_as_record=True)\n a = d['a']\n assert_equal(a.shape, (1,1))\n assert_equal(a.dtype, np.dtype(object))\n assert_(a[0,0] is None)\n stream = BytesIO()\n arr = np.array((), dtype='U')\n # before ticket fix, this used to give data type not understood\n savemat(stream, {'arr':arr})\n d = loadmat(stream)\n a2 = d['arr']\n assert_array_equal(a2, arr)\n\n\ndef test_save_empty_dict():\n # saving empty dict also gives empty struct\n stream = BytesIO()\n savemat(stream, {'arr': {}})\n d = loadmat(stream)\n a = d['arr']\n assert_equal(a.shape, (1,1))\n assert_equal(a.dtype, np.dtype(object))\n assert_(a[0,0] is None)\n\n\ndef assert_any_equal(output, alternatives):\n \"\"\" Assert `output` is equal to at least one element in `alternatives`\n \"\"\"\n one_equal = False\n for expected in alternatives:\n if np.all(output == expected):\n one_equal = True\n break\n assert_(one_equal)\n\n\ndef test_to_writeable():\n # Test to_writeable function\n res = to_writeable(np.array([1])) # pass through ndarrays\n assert_equal(res.shape, (1,))\n assert_array_equal(res, 1)\n # Dict fields can be written in any order\n expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')])\n expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')])\n alternatives = (expected1, expected2)\n assert_any_equal(to_writeable({'a':1,'b':2}), alternatives)\n # Fields with underscores discarded\n assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives)\n # Not-string fields discarded\n assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives)\n # String fields that are valid Python identifiers discarded\n assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives)\n # Object with field names is equivalent\n\n class klass(object):\n pass\n\n c = klass\n c.a = 1\n c.b = 2\n assert_any_equal(to_writeable(c), alternatives)\n # empty list and tuple go to empty array\n res = to_writeable([])\n assert_equal(res.shape, (0,))\n assert_equal(res.dtype.type, np.float64)\n res = to_writeable(())\n assert_equal(res.shape, (0,))\n assert_equal(res.dtype.type, np.float64)\n # None -> None\n assert_(to_writeable(None) is None)\n # String to strings\n assert_equal(to_writeable('a string').dtype.type, np.str_)\n # Scalars to numpy to numpy scalars\n res = to_writeable(1)\n assert_equal(res.shape, ())\n assert_equal(res.dtype.type, np.array(1).dtype.type)\n assert_array_equal(res, 1)\n # Empty dict returns EmptyStructMarker\n assert_(to_writeable({}) is EmptyStructMarker)\n # Object does not have (even empty) __dict__\n assert_(to_writeable(object()) is None)\n # Custom object does have empty __dict__, returns EmptyStructMarker\n\n class C(object):\n pass\n\n assert_(to_writeable(c()) is EmptyStructMarker)\n # dict keys with legal characters are convertible\n res = to_writeable({'a': 1})['a']\n assert_equal(res.shape, (1,))\n assert_equal(res.dtype.type, np.object_)\n # Only fields with illegal characters, falls back to EmptyStruct\n assert_(to_writeable({'1':1}) is EmptyStructMarker)\n assert_(to_writeable({'_a':1}) is EmptyStructMarker)\n # Unless there are valid fields, in which case structured array\n assert_equal(to_writeable({'1':1, 'f': 2}),\n np.array([(2,)], dtype=[('f', '|O8')]))\n\n\ndef test_recarray():\n # check roundtrip of structured array\n dt = [('f1', 'f8'),\n ('f2', 'S10')]\n arr = np.zeros((2,), dtype=dt)\n arr[0]['f1'] = 0.5\n arr[0]['f2'] = 'python'\n arr[1]['f1'] = 99\n arr[1]['f2'] = 'not perl'\n stream = BytesIO()\n savemat(stream, {'arr': arr})\n d = loadmat(stream, struct_as_record=False)\n a20 = d['arr'][0,0]\n assert_equal(a20.f1, 0.5)\n assert_equal(a20.f2, 'python')\n d = loadmat(stream, struct_as_record=True)\n a20 = d['arr'][0,0]\n assert_equal(a20['f1'], 0.5)\n assert_equal(a20['f2'], 'python')\n # structs always come back as object types\n assert_equal(a20.dtype, np.dtype([('f1', 'O'),\n ('f2', 'O')]))\n a21 = d['arr'].flat[1]\n assert_equal(a21['f1'], 99)\n assert_equal(a21['f2'], 'not perl')\n\n\ndef test_save_object():\n class C(object):\n pass\n c = C()\n c.field1 = 1\n c.field2 = 'a string'\n stream = BytesIO()\n savemat(stream, {'c': c})\n d = loadmat(stream, struct_as_record=False)\n c2 = d['c'][0,0]\n assert_equal(c2.field1, 1)\n assert_equal(c2.field2, 'a string')\n d = loadmat(stream, struct_as_record=True)\n c2 = d['c'][0,0]\n assert_equal(c2['field1'], 1)\n assert_equal(c2['field2'], 'a string')\n\n\ndef test_read_opts():\n # tests if read is seeing option sets, at initialization and after\n # initialization\n arr = np.arange(6).reshape(1,6)\n stream = BytesIO()\n savemat(stream, {'a': arr})\n rdr = MatFile5Reader(stream)\n back_dict = rdr.get_variables()\n rarr = back_dict['a']\n assert_array_equal(rarr, arr)\n rdr = MatFile5Reader(stream, squeeze_me=True)\n assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))\n rdr.squeeze_me = False\n assert_array_equal(rarr, arr)\n rdr = MatFile5Reader(stream, byte_order=boc.native_code)\n assert_array_equal(rdr.get_variables()['a'], arr)\n # inverted byte code leads to error on read because of swapped\n # header etc\n rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)\n assert_raises(Exception, rdr.get_variables)\n rdr.byte_order = boc.native_code\n assert_array_equal(rdr.get_variables()['a'], arr)\n arr = np.array(['a string'])\n stream.truncate(0)\n stream.seek(0)\n savemat(stream, {'a': arr})\n rdr = MatFile5Reader(stream)\n assert_array_equal(rdr.get_variables()['a'], arr)\n rdr = MatFile5Reader(stream, chars_as_strings=False)\n carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))\n assert_array_equal(rdr.get_variables()['a'], carr)\n rdr.chars_as_strings = True\n assert_array_equal(rdr.get_variables()['a'], arr)\n\n\ndef test_empty_string():\n # make sure reading empty string does not raise error\n estring_fname = pjoin(test_data_path, 'single_empty_string.mat')\n fp = open(estring_fname, 'rb')\n rdr = MatFile5Reader(fp)\n d = rdr.get_variables()\n fp.close()\n assert_array_equal(d['a'], np.array([], dtype='U1'))\n # empty string round trip. Matlab cannot distiguish\n # between a string array that is empty, and a string array\n # containing a single empty string, because it stores strings as\n # arrays of char. There is no way of having an array of char that\n # is not empty, but contains an empty string.\n stream = BytesIO()\n savemat(stream, {'a': np.array([''])})\n rdr = MatFile5Reader(stream)\n d = rdr.get_variables()\n assert_array_equal(d['a'], np.array([], dtype='U1'))\n stream.truncate(0)\n stream.seek(0)\n savemat(stream, {'a': np.array([], dtype='U1')})\n rdr = MatFile5Reader(stream)\n d = rdr.get_variables()\n assert_array_equal(d['a'], np.array([], dtype='U1'))\n stream.close()\n\n\ndef test_corrupted_data():\n import zlib\n for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'),\n (zlib.error, 'corrupted_zlib_checksum.mat')]:\n with open(pjoin(test_data_path, fname), 'rb') as fp:\n rdr = MatFile5Reader(fp)\n assert_raises(exc, rdr.get_variables)\n\n\ndef test_corrupted_data_check_can_be_disabled():\n with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp:\n rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False)\n rdr.get_variables()\n\n\ndef test_read_both_endian():\n # make sure big- and little- endian data is read correctly\n for fname in ('big_endian.mat', 'little_endian.mat'):\n fp = open(pjoin(test_data_path, fname), 'rb')\n rdr = MatFile5Reader(fp)\n d = rdr.get_variables()\n fp.close()\n assert_array_equal(d['strings'],\n np.array([['hello'],\n ['world']], dtype=object))\n assert_array_equal(d['floats'],\n np.array([[2., 3.],\n [3., 4.]], dtype=np.float32))\n\n\ndef test_write_opposite_endian():\n # We don't support writing opposite endian .mat files, but we need to behave\n # correctly if the user supplies an other-endian numpy array to write out\n float_arr = np.array([[2., 3.],\n [3., 4.]])\n int_arr = np.arange(6).reshape((2, 3))\n uni_arr = np.array(['hello', 'world'], dtype='U')\n stream = BytesIO()\n savemat(stream, {'floats': float_arr.byteswap().newbyteorder(),\n 'ints': int_arr.byteswap().newbyteorder(),\n 'uni_arr': uni_arr.byteswap().newbyteorder()})\n rdr = MatFile5Reader(stream)\n d = rdr.get_variables()\n assert_array_equal(d['floats'], float_arr)\n assert_array_equal(d['ints'], int_arr)\n assert_array_equal(d['uni_arr'], uni_arr)\n stream.close()\n\n\ndef test_logical_array():\n # The roundtrip test doesn't verify that we load the data up with the\n # correct (bool) dtype\n with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj:\n rdr = MatFile5Reader(fobj, mat_dtype=True)\n d = rdr.get_variables()\n x = np.array([[True], [False]], dtype=np.bool_)\n assert_array_equal(d['testbools'], x)\n assert_equal(d['testbools'].dtype, x.dtype)\n\n\ndef test_logical_out_type():\n # Confirm that bool type written as uint8, uint8 class\n # See gh-4022\n stream = BytesIO()\n barr = np.array([False, True, False])\n savemat(stream, {'barray': barr})\n stream.seek(0)\n reader = MatFile5Reader(stream)\n reader.initialize_read()\n reader.read_file_header()\n hdr, _ = reader.read_var_header()\n assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS)\n assert_equal(hdr.is_logical, True)\n var = reader.read_var_array(hdr, False)\n assert_equal(var.dtype.type, np.uint8)\n\n\ndef test_mat4_3d():\n # test behavior when writing 3D arrays to matlab 4 files\n stream = BytesIO()\n arr = np.arange(24).reshape((2,3,4))\n assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4')\n\n\ndef test_func_read():\n func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat')\n fp = open(func_eg, 'rb')\n rdr = MatFile5Reader(fp)\n d = rdr.get_variables()\n fp.close()\n assert_(isinstance(d['testfunc'], MatlabFunction))\n stream = BytesIO()\n wtr = MatFile5Writer(stream)\n assert_raises(MatWriteError, wtr.put_variables, d)\n\n\ndef test_mat_dtype():\n double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat')\n fp = open(double_eg, 'rb')\n rdr = MatFile5Reader(fp, mat_dtype=False)\n d = rdr.get_variables()\n fp.close()\n assert_equal(d['testmatrix'].dtype.kind, 'u')\n\n fp = open(double_eg, 'rb')\n rdr = MatFile5Reader(fp, mat_dtype=True)\n d = rdr.get_variables()\n fp.close()\n assert_equal(d['testmatrix'].dtype.kind, 'f')\n\n\ndef test_sparse_in_struct():\n # reproduces bug found by DC where Cython code was insisting on\n # ndarray return type, but getting sparse matrix\n st = {'sparsefield': SP.coo_matrix(np.eye(4))}\n stream = BytesIO()\n savemat(stream, {'a':st})\n d = loadmat(stream, struct_as_record=True)\n assert_array_equal(d['a'][0,0]['sparsefield'].todense(), np.eye(4))\n\n\ndef test_mat_struct_squeeze():\n stream = BytesIO()\n in_d = {'st':{'one':1, 'two':2}}\n savemat(stream, in_d)\n # no error without squeeze\n out_d = loadmat(stream, struct_as_record=False)\n # previous error was with squeeze, with mat_struct\n out_d = loadmat(stream,\n struct_as_record=False,\n squeeze_me=True,\n )\n\n\ndef test_scalar_squeeze():\n stream = BytesIO()\n in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}\n savemat(stream, in_d)\n out_d = loadmat(stream, squeeze_me=True)\n assert_(isinstance(out_d['scalar'], float))\n assert_(isinstance(out_d['string'], string_types))\n assert_(isinstance(out_d['st'], np.ndarray))\n\n\ndef test_str_round():\n # from report by Angus McMorland on mailing list 3 May 2010\n stream = BytesIO()\n in_arr = np.array(['Hello', 'Foob'])\n out_arr = np.array(['Hello', 'Foob '])\n savemat(stream, dict(a=in_arr))\n res = loadmat(stream)\n # resulted in ['HloolFoa', 'elWrdobr']\n assert_array_equal(res['a'], out_arr)\n stream.truncate(0)\n stream.seek(0)\n # Make Fortran ordered version of string\n in_str = in_arr.tostring(order='F')\n in_from_str = np.ndarray(shape=a.shape,\n dtype=in_arr.dtype,\n order='F',\n buffer=in_str)\n savemat(stream, dict(a=in_from_str))\n assert_array_equal(res['a'], out_arr)\n # unicode save did lead to buffer too small error\n stream.truncate(0)\n stream.seek(0)\n in_arr_u = in_arr.astype('U')\n out_arr_u = out_arr.astype('U')\n savemat(stream, {'a': in_arr_u})\n res = loadmat(stream)\n assert_array_equal(res['a'], out_arr_u)\n\n\ndef test_fieldnames():\n # Check that field names are as expected\n stream = BytesIO()\n savemat(stream, {'a': {'a':1, 'b':2}})\n res = loadmat(stream)\n field_names = res['a'].dtype.names\n assert_equal(set(field_names), set(('a', 'b')))\n\n\ndef test_loadmat_varnames():\n # Test that we can get just one variable from a mat file using loadmat\n mat5_sys_names = ['__globals__',\n '__header__',\n '__version__']\n for eg_file, sys_v_names in (\n (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin(\n test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)):\n vars = loadmat(eg_file)\n assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names))\n vars = loadmat(eg_file, variable_names='a')\n assert_equal(set(vars.keys()), set(['a'] + sys_v_names))\n vars = loadmat(eg_file, variable_names=['a'])\n assert_equal(set(vars.keys()), set(['a'] + sys_v_names))\n vars = loadmat(eg_file, variable_names=['theta'])\n assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))\n vars = loadmat(eg_file, variable_names=('theta',))\n assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))\n vars = loadmat(eg_file, variable_names=[])\n assert_equal(set(vars.keys()), set(sys_v_names))\n vnames = ['theta']\n vars = loadmat(eg_file, variable_names=vnames)\n assert_equal(vnames, ['theta'])\n\n\ndef test_round_types():\n # Check that saving, loading preserves dtype in most cases\n arr = np.arange(10)\n stream = BytesIO()\n for dts in ('f8','f4','i8','i4','i2','i1',\n 'u8','u4','u2','u1','c16','c8'):\n stream.truncate(0)\n stream.seek(0) # needed for BytesIO in python 3\n savemat(stream, {'arr': arr.astype(dts)})\n vars = loadmat(stream)\n assert_equal(np.dtype(dts), vars['arr'].dtype)\n\n\ndef test_varmats_from_mat():\n # Make a mat file with several variables, write it, read it back\n names_vars = (('arr', mlarr(np.arange(10))),\n ('mystr', mlarr('a string')),\n ('mynum', mlarr(10)))\n\n # Dict like thing to give variables in defined order\n class C(object):\n def items(self):\n return names_vars\n stream = BytesIO()\n savemat(stream, C())\n varmats = varmats_from_mat(stream)\n assert_equal(len(varmats), 3)\n for i in range(3):\n name, var_stream = varmats[i]\n exp_name, exp_res = names_vars[i]\n assert_equal(name, exp_name)\n res = loadmat(var_stream)\n assert_array_equal(res[name], exp_res)\n\n\ndef test_one_by_zero():\n # Test 1x0 chars get read correctly\n func_eg = pjoin(test_data_path, 'one_by_zero_char.mat')\n fp = open(func_eg, 'rb')\n rdr = MatFile5Reader(fp)\n d = rdr.get_variables()\n fp.close()\n assert_equal(d['var'].shape, (0,))\n\n\ndef test_load_mat4_le():\n # We were getting byte order wrong when reading little-endian floa64 dense\n # matrices on big-endian platforms\n mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat')\n vars = loadmat(mat4_fname)\n assert_array_equal(vars['a'], [[0.1, 1.2]])\n\n\ndef test_unicode_mat4():\n # Mat4 should save unicode as latin1\n bio = BytesIO()\n var = {'second_cat': u('Schrödinger')}\n savemat(bio, var, format='4')\n var_back = loadmat(bio)\n assert_equal(var_back['second_cat'], var['second_cat'])\n\n\ndef test_logical_sparse():\n # Test we can read logical sparse stored in mat file as bytes.\n # See https://github.com/scipy/scipy/issues/3539.\n # In some files saved by MATLAB, the sparse data elements (Real Part\n # Subelement in MATLAB speak) are stored with apparent type double\n # (miDOUBLE) but are in fact single bytes.\n filename = pjoin(test_data_path,'logical_sparse.mat')\n # Before fix, this would crash with:\n # ValueError: indices and data should have the same size\n d = loadmat(filename, struct_as_record=True)\n log_sp = d['sp_log_5_4']\n assert_(isinstance(log_sp, SP.csc_matrix))\n assert_equal(log_sp.dtype.type, np.bool_)\n assert_array_equal(log_sp.toarray(),\n [[True, True, True, False],\n [False, False, True, False],\n [False, False, True, False],\n [False, False, False, False],\n [False, False, False, False]])\n\n\ndef test_empty_sparse():\n # Can we read empty sparse matrices?\n sio = BytesIO()\n import scipy.sparse\n empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])\n savemat(sio, dict(x=empty_sparse))\n sio.seek(0)\n res = loadmat(sio)\n assert_array_equal(res['x'].shape, empty_sparse.shape)\n assert_array_equal(res['x'].todense(), 0)\n # Do empty sparse matrices get written with max nnz 1?\n # See https://github.com/scipy/scipy/issues/4208\n sio.seek(0)\n reader = MatFile5Reader(sio)\n reader.initialize_read()\n reader.read_file_header()\n hdr, _ = reader.read_var_header()\n assert_equal(hdr.nzmax, 1)\n\n\ndef test_empty_mat_error():\n # Test we get a specific warning for an empty mat file\n sio = BytesIO()\n assert_raises(MatReadError, loadmat, sio)\n\n\ndef test_miuint32_compromise():\n # Reader should accept miUINT32 for miINT32, but check signs\n # mat file with miUINT32 for miINT32, but OK values\n filename = pjoin(test_data_path, 'miuint32_for_miint32.mat')\n res = loadmat(filename)\n assert_equal(res['an_array'], np.arange(10)[None, :])\n # mat file with miUINT32 for miINT32, with negative value\n filename = pjoin(test_data_path, 'bad_miuint32.mat')\n with suppress_warnings() as sup:\n sup.filter(message=\"unclosed file\") # Py3k ResourceWarning\n assert_raises(ValueError, loadmat, filename)\n\n\ndef test_miutf8_for_miint8_compromise():\n # Check reader accepts ascii as miUTF8 for array names\n filename = pjoin(test_data_path, 'miutf8_array_name.mat')\n res = loadmat(filename)\n assert_equal(res['array_name'], [[1]])\n # mat file with non-ascii utf8 name raises error\n filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat')\n with suppress_warnings() as sup:\n sup.filter(message=\"unclosed file\") # Py3k ResourceWarning\n assert_raises(ValueError, loadmat, filename)\n\n\ndef test_bad_utf8():\n # Check that reader reads bad UTF with 'replace' option\n filename = pjoin(test_data_path,'broken_utf8.mat')\n res = loadmat(filename)\n assert_equal(res['bad_string'],\n b'\\x80 am broken'.decode('utf8', 'replace'))\n\n",
"from __future__ import absolute_import, division, print_function\n\nimport math\nimport os\nimport sys\nimport re\n\nimport numpy as np\n\nfrom numba import unittest_support as unittest\nfrom numba import njit\nfrom numba.compiler import compile_isolated, Flags, types\nfrom numba.runtime import rtsys\nfrom numba.runtime import nrtopt\nfrom .support import MemoryLeakMixin, TestCase\n\nenable_nrt_flags = Flags()\nenable_nrt_flags.set(\"nrt\")\n\n\nclass Dummy(object):\n alive = 0\n\n def __init__(self):\n type(self).alive += 1\n\n def __del__(self):\n type(self).alive -= 1\n\n\nclass TestNrtMemInfo(unittest.TestCase):\n \"\"\"\n Unitest for core MemInfo functionality\n \"\"\"\n\n def setUp(self):\n # Reset the Dummy class\n Dummy.alive = 0\n\n def test_meminfo_refct_1(self):\n d = Dummy()\n self.assertEqual(Dummy.alive, 1)\n addr = 0xdeadcafe # some made up location\n\n mi = rtsys.meminfo_new(addr, d)\n self.assertEqual(mi.refcount, 1)\n del d\n self.assertEqual(Dummy.alive, 1)\n mi.acquire()\n self.assertEqual(mi.refcount, 2)\n self.assertEqual(Dummy.alive, 1)\n mi.release()\n self.assertEqual(mi.refcount, 1)\n del mi\n self.assertEqual(Dummy.alive, 0)\n\n def test_meminfo_refct_2(self):\n d = Dummy()\n self.assertEqual(Dummy.alive, 1)\n addr = 0xdeadcafe # some made up location\n\n mi = rtsys.meminfo_new(addr, d)\n self.assertEqual(mi.refcount, 1)\n del d\n self.assertEqual(Dummy.alive, 1)\n for ct in range(100):\n mi.acquire()\n self.assertEqual(mi.refcount, 1 + 100)\n self.assertEqual(Dummy.alive, 1)\n for _ in range(100):\n mi.release()\n self.assertEqual(mi.refcount, 1)\n del mi\n self.assertEqual(Dummy.alive, 0)\n\n @unittest.skipIf(sys.version_info < (3,), \"memoryview not supported\")\n def test_fake_memoryview(self):\n d = Dummy()\n self.assertEqual(Dummy.alive, 1)\n addr = 0xdeadcafe # some made up location\n\n mi = rtsys.meminfo_new(addr, d)\n self.assertEqual(mi.refcount, 1)\n mview = memoryview(mi)\n self.assertEqual(mi.refcount, 1)\n self.assertEqual(addr, mi.data)\n self.assertFalse(mview.readonly)\n self.assertIs(mi, mview.obj)\n self.assertTrue(mview.c_contiguous)\n self.assertEqual(mview.itemsize, 1)\n self.assertEqual(mview.ndim, 1)\n del d\n del mi\n\n self.assertEqual(Dummy.alive, 1)\n del mview\n self.assertEqual(Dummy.alive, 0)\n\n @unittest.skipIf(sys.version_info < (3,), \"memoryview not supported\")\n def test_memoryview(self):\n from ctypes import c_uint32, c_void_p, POINTER, cast\n\n dtype = np.dtype(np.uint32)\n bytesize = dtype.itemsize * 10\n mi = rtsys.meminfo_alloc(bytesize, safe=True)\n addr = mi.data\n c_arr = cast(c_void_p(mi.data), POINTER(c_uint32 * 10))\n # Check 0xCB-filling\n for i in range(10):\n self.assertEqual(c_arr.contents[i], 0xcbcbcbcb)\n\n # Init array with ctypes\n for i in range(10):\n c_arr.contents[i] = i + 1\n mview = memoryview(mi)\n self.assertEqual(mview.nbytes, bytesize)\n self.assertFalse(mview.readonly)\n self.assertIs(mi, mview.obj)\n self.assertTrue(mview.c_contiguous)\n self.assertEqual(mview.itemsize, 1)\n self.assertEqual(mview.ndim, 1)\n del mi\n arr = np.ndarray(dtype=dtype, shape=mview.nbytes // dtype.itemsize,\n buffer=mview)\n del mview\n # Modify array with NumPy\n np.testing.assert_equal(np.arange(arr.size) + 1, arr)\n\n arr += 1\n\n # Check value reflected in ctypes\n for i in range(10):\n self.assertEqual(c_arr.contents[i], i + 2)\n\n self.assertEqual(arr.ctypes.data, addr)\n del arr\n # At this point the memory is zero filled\n # We can't check this deterministically because the memory could be\n # consumed by another thread.\n\n def test_buffer(self):\n from ctypes import c_uint32, c_void_p, POINTER, cast\n\n dtype = np.dtype(np.uint32)\n bytesize = dtype.itemsize * 10\n mi = rtsys.meminfo_alloc(bytesize, safe=True)\n self.assertEqual(mi.refcount, 1)\n addr = mi.data\n c_arr = cast(c_void_p(addr), POINTER(c_uint32 * 10))\n # Check 0xCB-filling\n for i in range(10):\n self.assertEqual(c_arr.contents[i], 0xcbcbcbcb)\n\n # Init array with ctypes\n for i in range(10):\n c_arr.contents[i] = i + 1\n\n arr = np.ndarray(dtype=dtype, shape=bytesize // dtype.itemsize,\n buffer=mi)\n self.assertEqual(mi.refcount, 1)\n del mi\n # Modify array with NumPy\n np.testing.assert_equal(np.arange(arr.size) + 1, arr)\n\n arr += 1\n\n # Check value reflected in ctypes\n for i in range(10):\n self.assertEqual(c_arr.contents[i], i + 2)\n\n self.assertEqual(arr.ctypes.data, addr)\n del arr\n # At this point the memory is zero filled\n # We can't check this deterministically because the memory could be\n # consumed by another thread.\n\n\[email protected](sys.version_info >= (3, 4),\n \"need Python 3.4+ for the tracemalloc module\")\nclass TestTracemalloc(unittest.TestCase):\n \"\"\"\n Test NRT-allocated memory can be tracked by tracemalloc.\n \"\"\"\n\n def measure_memory_diff(self, func):\n import tracemalloc\n tracemalloc.start()\n try:\n before = tracemalloc.take_snapshot()\n # Keep the result and only delete it after taking a snapshot\n res = func()\n after = tracemalloc.take_snapshot()\n del res\n return after.compare_to(before, 'lineno')\n finally:\n tracemalloc.stop()\n\n def test_snapshot(self):\n N = 1000000\n dtype = np.int8\n\n @njit\n def alloc_nrt_memory():\n \"\"\"\n Allocate and return a large array.\n \"\"\"\n return np.empty(N, dtype)\n\n def keep_memory():\n return alloc_nrt_memory()\n\n def release_memory():\n alloc_nrt_memory()\n\n alloc_lineno = keep_memory.__code__.co_firstlineno + 1\n\n # Warmup JIT\n alloc_nrt_memory()\n\n # The large NRT-allocated array should appear topmost in the diff\n diff = self.measure_memory_diff(keep_memory)\n stat = diff[0]\n # There is a slight overhead, so the allocated size won't exactly be N\n self.assertGreaterEqual(stat.size, N)\n self.assertLess(stat.size, N * 1.01)\n frame = stat.traceback[0]\n self.assertEqual(os.path.basename(frame.filename), \"test_nrt.py\")\n self.assertEqual(frame.lineno, alloc_lineno)\n\n # If NRT memory is released before taking a snapshot, it shouldn't\n # appear.\n diff = self.measure_memory_diff(release_memory)\n stat = diff[0]\n # Something else appears, but nothing the magnitude of N\n self.assertLess(stat.size, N * 0.01)\n\n\nclass TestNRTIssue(MemoryLeakMixin, TestCase):\n def test_issue_with_refct_op_pruning(self):\n \"\"\"\n GitHub Issue #1244 https://github.com/numba/numba/issues/1244\n \"\"\"\n @njit\n def calculate_2D_vector_mag(vector):\n x, y = vector\n\n return math.sqrt(x ** 2 + y ** 2)\n\n @njit\n def normalize_2D_vector(vector):\n normalized_vector = np.empty(2, dtype=np.float64)\n\n mag = calculate_2D_vector_mag(vector)\n x, y = vector\n\n normalized_vector[0] = x / mag\n normalized_vector[1] = y / mag\n\n return normalized_vector\n\n @njit\n def normalize_vectors(num_vectors, vectors):\n normalized_vectors = np.empty((num_vectors, 2), dtype=np.float64)\n\n for i in range(num_vectors):\n vector = vectors[i]\n\n normalized_vector = normalize_2D_vector(vector)\n\n normalized_vectors[i, 0] = normalized_vector[0]\n normalized_vectors[i, 1] = normalized_vector[1]\n\n return normalized_vectors\n\n num_vectors = 10\n test_vectors = np.random.random((num_vectors, 2))\n got = normalize_vectors(num_vectors, test_vectors)\n expected = normalize_vectors.py_func(num_vectors, test_vectors)\n\n np.testing.assert_almost_equal(expected, got)\n\n def test_incref_after_cast(self):\n # Issue #1427: when casting a value before returning it, the\n # cast result should be incref'ed, not the original value.\n def f():\n return 0.0, np.zeros(1, dtype=np.int32)\n\n # Note the return type isn't the same as the tuple type above:\n # the first element is a complex rather than a float.\n cres = compile_isolated(f, (),\n types.Tuple((types.complex128,\n types.Array(types.int32, 1, 'C')\n ))\n )\n z, arr = cres.entry_point()\n self.assertPreciseEqual(z, 0j)\n self.assertPreciseEqual(arr, np.zeros(1, dtype=np.int32))\n\n def test_refct_pruning_issue_1511(self):\n @njit\n def f():\n a = np.ones(10, dtype=np.float64)\n b = np.ones(10, dtype=np.float64)\n return a, b[:]\n\n a, b = f()\n np.testing.assert_equal(a, b)\n np.testing.assert_equal(a, np.ones(10, dtype=np.float64))\n\n def test_refct_pruning_issue_1526(self):\n @njit\n def udt(image, x, y):\n next_loc = np.where(image == 1)\n\n if len(next_loc[0]) == 0:\n y_offset = 1\n x_offset = 1\n else:\n y_offset = next_loc[0][0]\n x_offset = next_loc[1][0]\n\n next_loc_x = (x - 1) + x_offset\n next_loc_y = (y - 1) + y_offset\n\n return next_loc_x, next_loc_y\n\n a = np.array([[1, 0, 1, 0, 1, 0, 0, 1, 0, 0]])\n expect = udt.py_func(a, 1, 6)\n got = udt(a, 1, 6)\n\n self.assertEqual(expect, got)\n\n\nclass TestRefCtPruning(unittest.TestCase):\n\n sample_llvm_ir = '''\ndefine i32 @\"MyFunction\"(i8** noalias nocapture %retptr, { i8*, i32 }** noalias nocapture %excinfo, i8* noalias nocapture readnone %env, double %arg.vt.0, double %arg.vt.1, double %arg.vt.2, double %arg.vt.3, double %arg.bounds.0, double %arg.bounds.1, double %arg.bounds.2, double %arg.bounds.3, i8* %arg.xs.0, i8* nocapture readnone %arg.xs.1, i64 %arg.xs.2, i64 %arg.xs.3, double* nocapture readonly %arg.xs.4, i64 %arg.xs.5.0, i64 %arg.xs.6.0, i8* %arg.ys.0, i8* nocapture readnone %arg.ys.1, i64 %arg.ys.2, i64 %arg.ys.3, double* nocapture readonly %arg.ys.4, i64 %arg.ys.5.0, i64 %arg.ys.6.0, i8* %arg.aggs_and_cols.0.0, i8* nocapture readnone %arg.aggs_and_cols.0.1, i64 %arg.aggs_and_cols.0.2, i64 %arg.aggs_and_cols.0.3, i32* nocapture %arg.aggs_and_cols.0.4, i64 %arg.aggs_and_cols.0.5.0, i64 %arg.aggs_and_cols.0.5.1, i64 %arg.aggs_and_cols.0.6.0, i64 %arg.aggs_and_cols.0.6.1) local_unnamed_addr {\nentry:\ntail call void @NRT_incref(i8* %arg.xs.0)\ntail call void @NRT_incref(i8* %arg.ys.0)\ntail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0)\n%.251 = icmp sgt i64 %arg.xs.5.0, 0\nbr i1 %.251, label %B42.preheader, label %B160\n\nB42.preheader: ; preds = %entry\n%0 = add i64 %arg.xs.5.0, 1\nbr label %B42\n\nB42: ; preds = %B40.backedge, %B42.preheader\n%lsr.iv3 = phi i64 [ %lsr.iv.next, %B40.backedge ], [ %0, %B42.preheader ]\n%lsr.iv1 = phi double* [ %scevgep2, %B40.backedge ], [ %arg.xs.4, %B42.preheader ]\n%lsr.iv = phi double* [ %scevgep, %B40.backedge ], [ %arg.ys.4, %B42.preheader ]\n%.381 = load double, double* %lsr.iv1, align 8\n%.420 = load double, double* %lsr.iv, align 8\n%.458 = fcmp ole double %.381, %arg.bounds.1\n%not..432 = fcmp oge double %.381, %arg.bounds.0\n%\"$phi82.1.1\" = and i1 %.458, %not..432\nbr i1 %\"$phi82.1.1\", label %B84, label %B40.backedge\n\nB84: ; preds = %B42\n%.513 = fcmp ole double %.420, %arg.bounds.3\n%not..487 = fcmp oge double %.420, %arg.bounds.2\n%\"$phi106.1.1\" = and i1 %.513, %not..487\nbr i1 %\"$phi106.1.1\", label %B108.endif.endif.endif, label %B40.backedge\n\nB160: ; preds = %B40.backedge, %entry\ntail call void @NRT_decref(i8* %arg.ys.0)\ntail call void @NRT_decref(i8* %arg.xs.0)\ntail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0)\nstore i8* null, i8** %retptr, align 8\nret i32 0\n\nB108.endif.endif.endif: ; preds = %B84\n%.575 = fmul double %.381, %arg.vt.0\n%.583 = fadd double %.575, %arg.vt.1\n%.590 = fptosi double %.583 to i64\n%.630 = fmul double %.420, %arg.vt.2\n%.638 = fadd double %.630, %arg.vt.3\n%.645 = fptosi double %.638 to i64\ntail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0) ; GONE 1\ntail call void @NRT_decref(i8* null) ; GONE 2\ntail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0), !noalias !0 ; GONE 3\n%.62.i.i = icmp slt i64 %.645, 0\n%.63.i.i = select i1 %.62.i.i, i64 %arg.aggs_and_cols.0.5.0, i64 0\n%.64.i.i = add i64 %.63.i.i, %.645\n%.65.i.i = icmp slt i64 %.590, 0\n%.66.i.i = select i1 %.65.i.i, i64 %arg.aggs_and_cols.0.5.1, i64 0\n%.67.i.i = add i64 %.66.i.i, %.590\n%.84.i.i = mul i64 %.64.i.i, %arg.aggs_and_cols.0.5.1\n%.87.i.i = add i64 %.67.i.i, %.84.i.i\n%.88.i.i = getelementptr i32, i32* %arg.aggs_and_cols.0.4, i64 %.87.i.i\n%.89.i.i = load i32, i32* %.88.i.i, align 4, !noalias !3\n%.99.i.i = add i32 %.89.i.i, 1\nstore i32 %.99.i.i, i32* %.88.i.i, align 4, !noalias !3\ntail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0), !noalias !0 ; GONE 4\ntail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0) ; GONE 5\nbr label %B40.backedge\n\nB40.backedge: ; preds = %B108.endif.endif.endif, %B84, %B42\n%scevgep = getelementptr double, double* %lsr.iv, i64 1\n%scevgep2 = getelementptr double, double* %lsr.iv1, i64 1\n%lsr.iv.next = add i64 %lsr.iv3, -1\n%.294 = icmp sgt i64 %lsr.iv.next, 1\nbr i1 %.294, label %B42, label %B160\n}\n '''\n\n def test_refct_pruning_op_recognize(self):\n input_ir = self.sample_llvm_ir\n input_lines = list(input_ir.splitlines())\n before_increfs = [ln for ln in input_lines if 'NRT_incref' in ln]\n before_decrefs = [ln for ln in input_lines if 'NRT_decref' in ln]\n\n # prune\n output_ir = nrtopt._remove_redundant_nrt_refct(input_ir)\n output_lines = list(output_ir.splitlines())\n after_increfs = [ln for ln in output_lines if 'NRT_incref' in ln]\n after_decrefs = [ln for ln in output_lines if 'NRT_decref' in ln]\n\n # check\n self.assertNotEqual(before_increfs, after_increfs)\n self.assertNotEqual(before_decrefs, after_decrefs)\n\n pruned_increfs = set(before_increfs) - set(after_increfs)\n pruned_decrefs = set(before_decrefs) - set(after_decrefs)\n\n # the symm difference == or-combined\n combined = pruned_increfs | pruned_decrefs\n self.assertEqual(combined, pruned_increfs ^ pruned_decrefs)\n pruned_lines = '\\n'.join(combined)\n\n # all GONE lines are pruned\n for i in [1, 2, 3, 4, 5]:\n gone = '; GONE {}'.format(i)\n self.assertIn(gone, pruned_lines)\n # no other lines\n self.assertEqual(len(list(pruned_lines.splitlines())), len(combined))\n\n def test_refct_pruning_with_branches(self):\n '''testcase from #2350'''\n @njit\n def _append_non_na(x, y, agg, field):\n if not np.isnan(field):\n agg[y, x] += 1\n\n @njit\n def _append(x, y, agg, field):\n if not np.isnan(field):\n if np.isnan(agg[y, x]):\n agg[y, x] = field\n else:\n agg[y, x] += field\n\n @njit\n def append(x, y, agg, field):\n _append_non_na(x, y, agg, field)\n _append(x, y, agg, field)\n\n # Disable python wrapper to avoid detecting necessary\n # refcount inside it\n @njit(no_cpython_wrapper=True)\n def extend(arr, field):\n for i in range(arr.shape[0]):\n for j in range(arr.shape[1]):\n append(j, i, arr, field)\n\n # Compile\n extend.compile(\"(f4[:,::1], f4)\")\n\n # Test there are no reference count operations\n llvmir = str(extend.inspect_llvm(extend.signatures[0]))\n refops = list(re.finditer(r'(NRT_incref|NRT_decref)\\([^\\)]+\\)', llvmir))\n self.assertEqual(len(refops), 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"Core eval alignment algorithms\n\"\"\"\n\nimport warnings\nfrom functools import partial, wraps\nfrom pandas.compat import zip, range\n\nimport numpy as np\n\nimport pandas as pd\nfrom pandas import compat\nfrom pandas.errors import PerformanceWarning\nfrom pandas.core.common import flatten\nfrom pandas.core.computation.common import _result_type_many\n\n\ndef _align_core_single_unary_op(term):\n if isinstance(term.value, np.ndarray):\n typ = partial(np.asanyarray, dtype=term.value.dtype)\n else:\n typ = type(term.value)\n ret = typ,\n\n if not hasattr(term.value, 'axes'):\n ret += None,\n else:\n ret += _zip_axes_from_type(typ, term.value.axes),\n return ret\n\n\ndef _zip_axes_from_type(typ, new_axes):\n axes = {}\n for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):\n axes[ax_name] = new_axes[ax_ind]\n return axes\n\n\ndef _any_pandas_objects(terms):\n \"\"\"Check a sequence of terms for instances of PandasObject.\"\"\"\n return any(isinstance(term.value, pd.core.generic.PandasObject)\n for term in terms)\n\n\ndef _filter_special_cases(f):\n @wraps(f)\n def wrapper(terms):\n # single unary operand\n if len(terms) == 1:\n return _align_core_single_unary_op(terms[0])\n\n term_values = (term.value for term in terms)\n\n # we don't have any pandas objects\n if not _any_pandas_objects(terms):\n return _result_type_many(*term_values), None\n\n return f(terms)\n return wrapper\n\n\n@_filter_special_cases\ndef _align_core(terms):\n term_index = [i for i, term in enumerate(terms)\n if hasattr(term.value, 'axes')]\n term_dims = [terms[i].value.ndim for i in term_index]\n ndims = pd.Series(dict(zip(term_index, term_dims)))\n\n # initial axes are the axes of the largest-axis'd term\n biggest = terms[ndims.idxmax()].value\n typ = biggest._constructor\n axes = biggest.axes\n naxes = len(axes)\n gt_than_one_axis = naxes > 1\n\n for value in (terms[i].value for i in term_index):\n is_series = isinstance(value, pd.Series)\n is_series_and_gt_one_axis = is_series and gt_than_one_axis\n\n for axis, items in enumerate(value.axes):\n if is_series_and_gt_one_axis:\n ax, itm = naxes - 1, value.index\n else:\n ax, itm = axis, items\n\n if not axes[ax].is_(itm):\n axes[ax] = axes[ax].join(itm, how='outer')\n\n for i, ndim in compat.iteritems(ndims):\n for axis, items in zip(range(ndim), axes):\n ti = terms[i].value\n\n if hasattr(ti, 'reindex_axis'):\n transpose = isinstance(ti, pd.Series) and naxes > 1\n reindexer = axes[naxes - 1] if transpose else items\n\n term_axis_size = len(ti.axes[axis])\n reindexer_size = len(reindexer)\n\n ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))\n if ordm >= 1 and reindexer_size >= 10000:\n warnings.warn('Alignment difference on axis {0} is larger '\n 'than an order of magnitude on term {1!r}, '\n 'by more than {2:.4g}; performance may '\n 'suffer'.format(axis, terms[i].name, ordm),\n category=PerformanceWarning,\n stacklevel=6)\n\n if transpose:\n f = partial(ti.reindex, index=reindexer, copy=False)\n else:\n f = partial(ti.reindex_axis, reindexer, axis=axis,\n copy=False)\n\n terms[i].update(f())\n\n terms[i].update(terms[i].value.values)\n\n return typ, _zip_axes_from_type(typ, axes)\n\n\ndef _align(terms):\n \"\"\"Align a set of terms\"\"\"\n try:\n # flatten the parse tree (a nested list, really)\n terms = list(flatten(terms))\n except TypeError:\n # can't iterate so it must just be a constant or single variable\n if isinstance(terms.value, pd.core.generic.NDFrame):\n typ = type(terms.value)\n return typ, _zip_axes_from_type(typ, terms.value.axes)\n return np.result_type(terms.type), None\n\n # if all resolved variables are numeric scalars\n if all(term.isscalar for term in terms):\n return _result_type_many(*(term.value for term in terms)).type, None\n\n # perform the main alignment\n typ, axes = _align_core(terms)\n return typ, axes\n\n\ndef _reconstruct_object(typ, obj, axes, dtype):\n \"\"\"Reconstruct an object given its type, raw value, and possibly empty\n (None) axes.\n\n Parameters\n ----------\n typ : object\n A type\n obj : object\n The value to use in the type constructor\n axes : dict\n The axes to use to construct the resulting pandas object\n\n Returns\n -------\n ret : typ\n An object of type ``typ`` with the value `obj` and possible axes\n `axes`.\n \"\"\"\n try:\n typ = typ.type\n except AttributeError:\n pass\n\n res_t = np.result_type(obj.dtype, dtype)\n\n if (not isinstance(typ, partial) and\n issubclass(typ, pd.core.generic.PandasObject)):\n return typ(obj, dtype=res_t, **axes)\n\n # special case for pathological things like ~True/~False\n if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:\n ret_value = res_t.type(obj)\n else:\n ret_value = typ(obj).astype(res_t)\n # The condition is to distinguish 0-dim array (returned in case of\n # scalar) and 1 element array\n # e.g. np.array(0) and np.array([0])\n if len(obj.shape) == 1 and len(obj) == 1:\n if not isinstance(ret_value, np.ndarray):\n ret_value = np.array([ret_value]).astype(res_t)\n\n return ret_value\n",
"\nfrom __future__ import print_function, absolute_import, division\n\nimport collections\n\nimport numpy as np\n\nfrom numba import types\n\n\nQuicksortImplementation = collections.namedtuple(\n 'QuicksortImplementation',\n (# The compile function itself\n 'compile',\n # All subroutines exercised by test_sort\n 'partition', 'partition3', 'insertion_sort',\n # The top-level function\n 'run_quicksort',\n ))\n\n\nPartition = collections.namedtuple('Partition', ('start', 'stop'))\n\n# Under this size, switch to a simple insertion sort\nSMALL_QUICKSORT = 15\n\nMAX_STACK = 100\n\n\ndef make_quicksort_impl(wrap, lt=None, is_argsort=False):\n\n intp = types.intp\n zero = intp(0)\n\n # Two subroutines to make the core algorithm generic wrt. argsort\n # or normal sorting. Note the genericity may make basic sort()\n # slightly slower (~5%)\n if is_argsort:\n @wrap\n def make_res(A):\n return np.arange(A.size)\n\n @wrap\n def GET(A, idx_or_val):\n return A[idx_or_val]\n\n else:\n @wrap\n def make_res(A):\n return A\n\n @wrap\n def GET(A, idx_or_val):\n return idx_or_val\n\n def default_lt(a, b):\n \"\"\"\n Trivial comparison function between two keys.\n \"\"\"\n return a < b\n\n LT = wrap(lt if lt is not None else default_lt)\n\n @wrap\n def insertion_sort(A, R, low, high):\n \"\"\"\n Insertion sort A[low:high + 1]. Note the inclusive bounds.\n \"\"\"\n assert low >= 0\n if high <= low:\n return\n\n for i in range(low + 1, high + 1):\n k = R[i]\n v = GET(A, k)\n # Insert v into A[low:i]\n j = i\n while j > low and LT(v, GET(A, R[j - 1])):\n # Make place for moving A[i] downwards\n R[j] = R[j - 1]\n j -= 1\n R[j] = k\n\n @wrap\n def partition(A, R, low, high):\n \"\"\"\n Partition A[low:high + 1] around a chosen pivot. The pivot's index\n is returned.\n \"\"\"\n assert low >= 0\n assert high > low\n\n mid = (low + high) >> 1\n # NOTE: the pattern of swaps below for the pivot choice and the\n # partitioning gives good results (i.e. regular O(n log n))\n # on sorted, reverse-sorted, and uniform arrays. Subtle changes\n # risk breaking this property.\n\n # median of three {low, middle, high}\n if LT(GET(A, R[mid]), GET(A, R[low])):\n R[low], R[mid] = R[mid], R[low]\n if LT(GET(A, R[high]), GET(A, R[mid])):\n R[high], R[mid] = R[mid], R[high]\n if LT(GET(A, R[mid]), GET(A, R[low])):\n R[low], R[mid] = R[mid], R[low]\n pivot = GET(A, R[mid])\n\n # Temporarily stash the pivot at the end\n R[high], R[mid] = R[mid], R[high]\n i = low\n j = high - 1\n while True:\n while i < high and LT(GET(A, R[i]), pivot):\n i += 1\n while j >= low and LT(pivot, GET(A, R[j])):\n j -= 1\n if i >= j:\n break\n R[i], R[j] = R[j], R[i]\n i += 1\n j -= 1\n # Put the pivot back in its final place (all items before `i`\n # are smaller than the pivot, all items at/after `i` are larger)\n R[i], R[high] = R[high], R[i]\n return i\n\n @wrap\n def partition3(A, low, high):\n \"\"\"\n Three-way partition [low, high) around a chosen pivot.\n A tuple (lt, gt) is returned such that:\n - all elements in [low, lt) are < pivot\n - all elements in [lt, gt] are == pivot\n - all elements in (gt, high] are > pivot\n \"\"\"\n mid = (low + high) >> 1\n # median of three {low, middle, high}\n if LT(A[mid], A[low]):\n A[low], A[mid] = A[mid], A[low]\n if LT(A[high], A[mid]):\n A[high], A[mid] = A[mid], A[high]\n if LT(A[mid], A[low]):\n A[low], A[mid] = A[mid], A[low]\n pivot = A[mid]\n\n A[low], A[mid] = A[mid], A[low]\n lt = low\n gt = high\n i = low + 1\n while i <= gt:\n if LT(A[i], pivot):\n A[lt], A[i] = A[i], A[lt]\n lt += 1\n i += 1\n elif LT(pivot, A[i]):\n A[gt], A[i] = A[i], A[gt]\n gt -= 1\n else:\n i += 1\n return lt, gt\n\n @wrap\n def run_quicksort(A):\n R = make_res(A)\n\n if len(A) < 2:\n return R\n\n stack = [Partition(zero, zero)] * MAX_STACK\n stack[0] = Partition(zero, len(A) - 1)\n n = 1\n\n while n > 0:\n n -= 1\n low, high = stack[n]\n # Partition until it becomes more efficient to do an insertion sort\n while high - low >= SMALL_QUICKSORT:\n assert n < MAX_STACK\n i = partition(A, R, low, high)\n # Push largest partition on the stack\n if high - i > i - low:\n # Right is larger\n if high > i:\n stack[n] = Partition(i + 1, high)\n n += 1\n high = i - 1\n else:\n if i > low:\n stack[n] = Partition(low, i - 1)\n n += 1\n low = i + 1\n\n insertion_sort(A, R, low, high)\n\n return R\n\n # Unused quicksort implementation based on 3-way partitioning; the\n # partitioning scheme turns out exhibiting bad behaviour on sorted arrays.\n @wrap\n def _run_quicksort(A):\n stack = [Partition(zero, zero)] * 100\n stack[0] = Partition(zero, len(A) - 1)\n n = 1\n\n while n > 0:\n n -= 1\n low, high = stack[n]\n # Partition until it becomes more efficient to do an insertion sort\n while high - low >= SMALL_QUICKSORT:\n assert n < MAX_STACK\n l, r = partition3(A, low, high)\n # One trivial (empty) partition => iterate on the other\n if r == high:\n high = l - 1\n elif l == low:\n low = r + 1\n # Push largest partition on the stack\n elif high - r > l - low:\n # Right is larger\n stack[n] = Partition(r + 1, high)\n n += 1\n high = l - 1\n else:\n stack[n] = Partition(low, l - 1)\n n += 1\n low = r + 1\n\n insertion_sort(A, low, high)\n\n\n return QuicksortImplementation(wrap,\n partition, partition3, insertion_sort,\n run_quicksort)\n\n\ndef make_py_quicksort(*args, **kwargs):\n return make_quicksort_impl((lambda f: f), *args, **kwargs)\n\ndef make_jit_quicksort(*args, **kwargs):\n from numba.extending import register_jitable\n return make_quicksort_impl((lambda f: register_jitable(f)),\n *args, **kwargs)\n",
"# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom warnings import catch_warnings\nimport numpy as np\n\nimport pandas\nfrom pandas.core import common as com\nfrom pandas.api import types\nfrom pandas.util import testing as tm\n\nfrom .test_api import Base\n\n\nclass TestTypes(Base):\n\n allowed = ['is_bool', 'is_bool_dtype',\n 'is_categorical', 'is_categorical_dtype', 'is_complex',\n 'is_complex_dtype', 'is_datetime64_any_dtype',\n 'is_datetime64_dtype', 'is_datetime64_ns_dtype',\n 'is_datetime64tz_dtype', 'is_datetimetz', 'is_dtype_equal',\n 'is_extension_type', 'is_float', 'is_float_dtype',\n 'is_int64_dtype', 'is_integer',\n 'is_integer_dtype', 'is_number', 'is_numeric_dtype',\n 'is_object_dtype', 'is_scalar', 'is_sparse',\n 'is_string_dtype', 'is_signed_integer_dtype',\n 'is_timedelta64_dtype', 'is_timedelta64_ns_dtype',\n 'is_unsigned_integer_dtype', 'is_period',\n 'is_period_dtype', 'is_interval', 'is_interval_dtype',\n 'is_re', 'is_re_compilable',\n 'is_dict_like', 'is_iterator', 'is_file_like',\n 'is_list_like', 'is_hashable',\n 'is_named_tuple',\n 'pandas_dtype', 'union_categoricals', 'infer_dtype']\n deprecated = ['is_any_int_dtype', 'is_floating_dtype', 'is_sequence']\n dtypes = ['CategoricalDtype', 'DatetimeTZDtype',\n 'PeriodDtype', 'IntervalDtype']\n\n def test_types(self):\n\n self.check(types, self.allowed + self.dtypes + self.deprecated)\n\n def check_deprecation(self, fold, fnew):\n with tm.assert_produces_warning(DeprecationWarning):\n try:\n result = fold('foo')\n expected = fnew('foo')\n assert result == expected\n except TypeError:\n pytest.raises(TypeError, lambda: fnew('foo'))\n except AttributeError:\n pytest.raises(AttributeError, lambda: fnew('foo'))\n\n def test_deprecation_core_common(self):\n\n # test that we are in fact deprecating\n # the pandas.core.common introspectors\n for t in self.allowed:\n self.check_deprecation(getattr(com, t), getattr(types, t))\n\n def test_deprecation_core_common_array_equivalent(self):\n\n with tm.assert_produces_warning(DeprecationWarning):\n com.array_equivalent(np.array([1, 2]), np.array([1, 2]))\n\n def test_deprecation_core_common_moved(self):\n\n # these are in pandas.core.dtypes.common\n l = ['is_datetime_arraylike',\n 'is_datetime_or_timedelta_dtype',\n 'is_datetimelike',\n 'is_datetimelike_v_numeric',\n 'is_datetimelike_v_object',\n 'is_datetimetz',\n 'is_int_or_datetime_dtype',\n 'is_period_arraylike',\n 'is_string_like',\n 'is_string_like_dtype']\n\n from pandas.core.dtypes import common as c\n for t in l:\n self.check_deprecation(getattr(com, t), getattr(c, t))\n\n def test_removed_from_core_common(self):\n\n for t in ['is_null_datelike_scalar',\n 'ensure_float']:\n pytest.raises(AttributeError, lambda: getattr(com, t))\n\n def test_deprecated_from_api_types(self):\n\n for t in self.deprecated:\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n getattr(types, t)(1)\n\n\ndef test_moved_infer_dtype():\n\n with catch_warnings(record=True):\n e = pandas.lib.infer_dtype('foo')\n assert e is not None\n"
] | [
[
"numpy.arange",
"numpy.ones"
],
[
"numpy.testing.assert_equal",
"numpy.random.random",
"numpy.zeros_like"
],
[
"numpy.testing.assert_equal",
"numpy.distutils.misc_util.gpaths",
"numpy.testing.run_module_suite",
"numpy.distutils.misc_util.minrelpath",
"numpy.distutils.misc_util.get_shared_lib_extension",
"numpy.distutils.misc_util.get_info",
"numpy.distutils.misc_util.appendpath"
],
[
"pandas.to_datetime",
"pandas.MultiIndex",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.MultiIndex.from_arrays",
"pandas.date_range",
"pandas.compat.lrange",
"numpy.random.randint"
],
[
"numpy.can_cast",
"numpy.asarray",
"numpy.arange",
"numpy.issubdtype",
"numpy.dtype",
"numpy.atleast_1d",
"numpy.ndim",
"numpy.iinfo",
"numpy.isscalar",
"numpy.broadcast_arrays",
"numpy.find_common_type",
"numpy.array"
],
[
"pandas.Series",
"pandas.DataFrame",
"numpy.dtype",
"pandas.util.testing.assert_index_equal",
"pandas.util.testing.round_trip_pickle",
"numpy.negative",
"numpy.divide",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.Index",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing.equalContents",
"pandas.util.testing.assert_attr_equal",
"numpy.multiply",
"pandas.Timedelta",
"numpy.timedelta64",
"pandas.date_range",
"pandas.util.testing.makeTimedeltaIndex",
"numpy.array",
"pandas.timedelta_range",
"numpy.absolute",
"pandas.TimedeltaIndex",
"pandas.util.testing.assert_raises_regex",
"pandas.to_timedelta",
"pandas.offsets.Hour"
],
[
"numpy.dot",
"numpy.einsum",
"numpy.linspace",
"numpy.polynomial.polynomial.polyfit",
"numpy.polynomial.polynomial.polymulx",
"numpy.polynomial.polynomial.polyval2d",
"numpy.polynomial.polynomial.polygrid2d",
"numpy.polynomial.polynomial.polytrim",
"numpy.zeros_like",
"numpy.polynomial.polynomial.polyfromroots",
"numpy.polynomial.polynomial.polyvander3d",
"numpy.random.randint",
"numpy.testing.assert_equal",
"numpy.polynomial.polynomial.polysub",
"numpy.polynomial.polynomial.polydiv",
"numpy.arange",
"numpy.testing.assert_almost_equal",
"numpy.polynomial.polynomial.polyder",
"numpy.zeros",
"numpy.polynomial.polynomial.polyvander2d",
"numpy.polynomial.polynomial.polyval3d",
"numpy.polynomial.polynomial.polyint",
"numpy.polynomial.polynomial.polyadd",
"numpy.polynomial.polynomial.polyval",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"numpy.polynomial.polynomial.polygrid3d",
"numpy.array",
"numpy.polynomial.polynomial.polycompanion",
"numpy.testing.run_module_suite",
"numpy.random.random",
"numpy.polynomial.polynomial.polyvalfromroots",
"numpy.polynomial.polynomial.polyroots",
"numpy.empty",
"numpy.ones",
"numpy.polynomial.polynomial.polyvander",
"numpy.polynomial.polynomial.polymul",
"numpy.polynomial.polynomial.polyline",
"numpy.vstack"
],
[
"numpy.dot",
"numpy.random.random",
"numpy.zeros_like",
"numpy.testing.assert_allclose"
],
[
"numpy.matrix",
"numpy.complex128",
"pandas.core.dtypes.common.is_datetime64_ns_dtype",
"numpy.object_",
"pandas.Series",
"pandas.PeriodIndex",
"numpy.bool",
"pandas._libs.lib.maybe_convert_numeric",
"pandas.core.dtypes.inference.is_hashable",
"pandas.DataFrame",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.lib.is_timedelta_array",
"pandas.core.dtypes.common.is_datetime64_dtype",
"numpy.str_",
"pandas.core.dtypes.inference.is_list_like",
"numpy.bool_",
"pandas.core.dtypes.common.is_timedelta64_ns_dtype",
"pandas.core.dtypes.missing.isnull",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_categorical_equal",
"numpy.arange",
"pandas.compat.StringIO",
"pandas.core.dtypes.common._ensure_int32",
"pandas.core.dtypes.inference.is_named_tuple",
"pandas.util.testing._skip_if_no_scipy",
"pandas.Index",
"pandas.core.dtypes.inference.is_re_compilable",
"pandas._libs.lib.is_datetime64_array",
"pandas._libs.lib.is_timedelta64_array",
"pandas._libs.lib.is_timedelta_or_timedelta64_array",
"pandas.core.dtypes.common.is_number",
"numpy.bytes_",
"numpy.float32",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common._ensure_categorical",
"pandas.Panel",
"pandas.compat.u",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.Categorical",
"pandas.core.dtypes.inference.is_dict_like",
"pandas._libs.lib.is_datetime_array",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.isnan",
"pandas.Timedelta",
"numpy.timedelta64",
"numpy.int64",
"pandas._libs.lib.to_object_array",
"pandas.date_range",
"numpy.array",
"pandas.core.dtypes.common.is_bool",
"pandas._libs.lib.isposinf_scalar",
"pandas.TimedeltaIndex",
"pandas._libs.lib.to_object_array_tuples",
"pandas._libs.lib.is_period",
"pandas.util.testing.assert_raises_regex",
"pandas.core.dtypes.common.is_scalar",
"numpy.int32",
"pandas.core.dtypes.common.is_integer",
"pandas._libs.lib.isneginf_scalar",
"numpy.datetime64",
"numpy.uint64",
"numpy.float64",
"pandas._libs.lib.maybe_convert_objects",
"pandas.Period",
"pandas.compat.lrange",
"pandas._libs.lib.infer_dtype",
"pandas.core.dtypes.inference.is_nested_list_like",
"pandas.Timestamp",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.dtypes.inference.is_re"
],
[
"pandas.util.testing.skip_if_no_package",
"pandas.Series",
"scipy.stats.rankdata",
"numpy.isnan",
"pandas.compat.product",
"numpy.arange",
"pandas.util.testing._skip_if_no_scipy",
"pandas.util.testing.assert_series_equal",
"numpy.random.shuffle",
"numpy.random.randn",
"pandas.compat.iteritems",
"pandas.date_range",
"numpy.repeat",
"numpy.array"
],
[
"numpy.testing.run_module_suite",
"numpy.random.random",
"numpy.fft.rfftfreq",
"numpy.fft.fftshift",
"numpy.ones",
"numpy.fft.ifftshift",
"numpy.fft.fftfreq",
"numpy.fft.irfftn",
"numpy.zeros",
"numpy.fft.helper._FFTCache",
"numpy.empty"
],
[
"numpy.diag",
"numpy.dot",
"scipy.linalg.pinv",
"scipy._lib._numpy_compat.suppress_warnings",
"numpy.rollaxis",
"numpy.sqrt",
"numpy.issubdtype",
"numpy.dtype",
"scipy.linalg._testutils.assert_no_overwrite",
"scipy.linalg.solveh_banded",
"numpy.random.randn",
"scipy.linalg.solve_triangular",
"numpy.conjugate",
"numpy.tril",
"scipy._lib._version.NumpyVersion",
"numpy.testing.assert_equal",
"numpy.linalg.svd",
"numpy.swapaxes",
"numpy.ones_like",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"numpy.empty_like",
"scipy.linalg.lstsq",
"numpy.finfo",
"numpy.testing.assert_almost_equal",
"scipy.linalg.pinv2",
"scipy.linalg.norm",
"scipy.linalg.pinvh",
"scipy.linalg.inv",
"scipy.linalg.solve",
"scipy.linalg.solve_banded",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"scipy.linalg.matrix_balance",
"scipy.linalg.det",
"numpy.identity",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.transpose",
"numpy.testing.assert_allclose",
"scipy.linalg.solve_circulant",
"numpy.array",
"numpy.log2",
"numpy.random.random",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.ones",
"numpy.testing.assert_array_equal",
"scipy.linalg.circulant",
"numpy.empty"
],
[
"pandas.compat.builtins.range",
"pandas.compat.lzip",
"pandas.compat.map",
"pandas.compat.lmap",
"pandas.compat.builtins.map",
"pandas.compat.zip",
"pandas.compat.builtins.filter",
"pandas.compat.builtins.zip",
"pandas.compat.iterkeys",
"pandas.compat.itervalues",
"pandas.compat.iteritems",
"pandas.compat.filter",
"pandas.compat.lfilter",
"pandas.compat.lrange",
"pandas.compat.range"
],
[
"pandas._libs.lib.try_parse_datetime_components",
"pandas._libs.lib.try_parse_date_and_time",
"pandas.compat.map",
"pandas._libs.lib.try_parse_year_month_day",
"numpy.array",
"numpy.empty",
"pandas.compat.range"
],
[
"scipy.special.pdtr",
"scipy.special.zeta",
"numpy.minimum",
"numpy.sqrt",
"numpy.asarray",
"scipy.special.pdtrc",
"numpy.exp",
"numpy.where",
"numpy.polyval",
"scipy.special.nbdtrc",
"scipy.special.entr",
"numpy.ones_like",
"scipy.special.bdtrik",
"numpy.arange",
"numpy.ceil",
"scipy.special.nbdtrik",
"numpy.log1p",
"scipy._lib._numpy_compat.broadcast_to",
"scipy.special.betainc",
"scipy.special.pdtrik",
"numpy.log",
"scipy.special.xlog1py",
"numpy.cosh",
"numpy.power",
"scipy.special.log1p",
"scipy.special.betaln",
"numpy.floor",
"scipy.special.gammaln",
"numpy.tanh",
"scipy.special.xlogy",
"scipy.special.bdtrc",
"numpy.maximum",
"numpy.expm1",
"numpy.sinh",
"numpy.vectorize",
"scipy.special.bdtr"
],
[
"pandas.core.computation.expressions.set_use_numexpr",
"pandas.core.computation.expressions.set_test_mode",
"pandas.core.computation.expressions.evaluate",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"pandas.core.computation.expressions.where",
"numpy.where",
"numpy.random.randint",
"pandas.util.testing.makePanel4D",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_series_equal",
"pandas.core.computation.expressions.set_numexpr_threads",
"pandas.util.testing.use_numexpr",
"numpy.random.rand",
"pandas.core.computation.expressions.get_test_result",
"pandas.util.testing.assert_raises_regex",
"numpy.shape",
"pandas.core.computation.expressions._can_use_numexpr",
"pandas.io.formats.printing.pprint_thing",
"numpy.empty"
],
[
"numpy.zeros"
],
[
"scipy.io.matlab.miobase.matdims",
"scipy._lib._numpy_compat.suppress_warnings",
"numpy.sqrt",
"scipy.io.matlab.mio.mat_reader_factory",
"numpy.ndarray",
"numpy.dtype",
"numpy.all",
"numpy.exp",
"scipy.io.matlab.mio5.varmats_from_mat",
"numpy.testing.assert_equal",
"scipy.sparse.coo_matrix",
"scipy.sparse.issparse",
"scipy.io.matlab.mio.whosmat",
"numpy.arange",
"scipy.io.matlab.mio5.to_writeable",
"numpy.eye",
"scipy.io.matlab.mio.loadmat",
"numpy.sin",
"numpy.repeat",
"scipy._lib.six.u",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"scipy.sparse.csc_matrix",
"scipy.io.matlab.mio5.MatFile5Writer",
"scipy.io.matlab.mio5.MatFile5Reader",
"numpy.testing.assert_",
"numpy.array",
"numpy.cos",
"numpy.testing.assert_array_equal",
"numpy.empty",
"scipy.io.matlab.mio.savemat"
],
[
"numpy.testing.assert_equal",
"numpy.random.random",
"numpy.isnan",
"numpy.arange",
"numpy.ndarray",
"numpy.dtype",
"numpy.ones",
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.empty"
],
[
"pandas.core.common.flatten",
"pandas.core.computation.common._result_type_many",
"numpy.result_type",
"pandas.compat.zip",
"pandas.compat.iteritems",
"numpy.array",
"pandas.compat.range"
],
[
"numpy.arange"
],
[
"pandas.lib.infer_dtype",
"numpy.array",
"pandas.util.testing.assert_produces_warning"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
basaks/uncover-ml | [
"167af7666845e2f0936aa4fc0e60abf8b1984219"
] | [
"scripts/intersect_rasters.py"
] | [
"from pathlib import Path\n\nimport numpy as np\nimport rasterio\nimport geopandas as gpd\nfrom joblib import Parallel, delayed\n\ndata_location = \\\n Path(\"/g/data/ge3/covariates/national_albers_filled_new/albers_cropped/\")\n# Read points from shapefile\n\nshapefile_location = Path(\"/g/data/ge3/aem_sections/AEM_covariates/\")\n\n# local\n# k = data_location.joinpath('data', 'LATITUDE_GRID1.tif')\n# shapefile_location = Path(\"configs/data\")\n# shp = shapefile_location.joinpath('geochem_sites.shp')\n\ngeotifs = {\n \"relief_radius4.tif\": \"relief4\",\n \"national_Wii_RF_multirandomforest_prediction.tif\": \"mrf_pred\",\n \"MvrtpLL_smooth.tif\": \"mrvtpLL_s\",\n \"MvrtpLL_fin.tif\": \"mvrtpLL_f\",\n \"LOC_distance_to_coast.tif\": \"LOC_dis\",\n \"Gravity_land.tif\": \"gravity\",\n \"dem_fill.tif\": \"dem\",\n \"Clim_Prescott_LindaGregory.tif\": \"clim_linda\",\n \"clim_PTA_albers.tif\": \"clim_alber\",\n \"SagaWET9cell_M.tif\": \"sagawet\",\n \"ceno_euc_aust1.tif\": \"ceno_euc\"\n}\n\n\ndownscale_factor = 2 # keep 1 point in a 2x2 cell\n\n\ndef intersect_and_sample_shp(shp: Path):\n print(\"====================================\\n\", f\"intersecting {shp.as_posix()}\")\n pts = gpd.read_file(shp)\n coords = np.array([(p.x, p.y) for p in pts.geometry])\n tif_name = list(geotifs.keys())[0]\n tif = data_location.joinpath(tif_name)\n orig_cols = pts.columns\n with rasterio.open(tif) as src:\n # resample data to target shape\n data = src.read(\n out_shape=(\n src.count,\n int(src.height / downscale_factor),\n int(src.width / downscale_factor)\n ),\n resampling=rasterio.enums.Resampling.bilinear\n )\n # scale image transform\n transform = src.transform * src.transform.scale(\n (src.width / data.shape[-1]),\n (src.height / data.shape[-2])\n )\n pts[\"rows\"], pts[\"cols\"] = rasterio.transform.rowcol(transform, coords[:, 0], coords[:, 1])\n\n pts_deduped = pts.drop_duplicates(subset=['rows', 'cols'])[orig_cols]\n coords_deduped = np.array([(p.x, p.y) for p in pts_deduped.geometry])\n\n for k, v in geotifs.items():\n print(f\"adding {k} to output dataframe\")\n with rasterio.open(data_location.joinpath(k)) as src:\n pts_deduped[v] = [x[0] for x in src.sample(coords_deduped)]\n pts_deduped.to_file(Path('out').joinpath(shp.name))\n # pts.to_csv(Path(\"out\").joinpath(shp.stem + \".csv\"), index=False)\n\n\nrets = Parallel(\n n_jobs=-1,\n verbose=100,\n)(delayed(intersect_and_sample_shp)(s) for s in shapefile_location.glob(\"*.shp\"))\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wnov/vega | [
"19256aca4d047bfad3b461f0a927e1c2abb9eb03",
"19256aca4d047bfad3b461f0a927e1c2abb9eb03"
] | [
"vega/core/pipeline/fully_train_pipe_step.py",
"zeus/datasets/common/cifar100.py"
] | [
"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"Fully Train PipeStep that used in Pipeline.\"\"\"\nimport os\nimport logging\nimport subprocess\nimport pickle\nimport vega\nfrom .pipe_step import PipeStep\nfrom zeus.common import ClassFactory, ClassType\nfrom zeus.common import FileOps, TaskOps\nfrom ..scheduler import create_master\nfrom zeus.common.general import General\nfrom zeus.report import Report, ReportRecord\nfrom vega.core.pipeline.conf import PipeStepConfig, PipelineConfig\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](ClassType.PIPE_STEP)\nclass FullyTrainPipeStep(PipeStep):\n \"\"\"FullyTrainPipeStep is the implementation class of PipeStep.\n\n Fully train is the last pipe step in pipeline, we provide horovrd or local trainer\n for user to choose.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n logger.info(\"init FullyTrainPipeStep...\")\n\n def do(self):\n \"\"\"Start to run fully train with horovod or local trainer.\"\"\"\n logger.info(\"FullyTrainPipeStep started...\")\n cls_trainer = ClassFactory.get_cls('trainer', \"Trainer\")\n if cls_trainer.config.distributed:\n self._do_distributed_fully_train()\n else:\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.master = create_master(update_func=Report().update_report)\n self._train_multi_models(records)\n Report().output_step_all_records(\n step_name=self.task.step_name, weights_file=True, performance=True)\n self.master.close_client()\n Report().backup_output_path()\n\n def _get_current_step_records(self):\n step_name = self.task.step_name\n models_folder = PipeStepConfig.pipe_step.get(\"models_folder\")\n records = []\n cur_index = PipelineConfig.steps.index(step_name)\n if cur_index >= 1 or models_folder:\n # records = Report().get_pareto_front_records(PipelineConfig.steps[cur_index - 1])\n if not models_folder:\n models_folder = FileOps.join_path(\n TaskOps().local_output_path, PipelineConfig.steps[cur_index - 1])\n models_folder = models_folder.replace(\n \"{local_base_path}\", TaskOps().local_base_path)\n records = Report().load_records_from_model_folder(models_folder)\n else:\n records = [ReportRecord(step_name, 0)]\n logging.debug(\"Records: {}\".format(records))\n for record in records:\n record.step_name = step_name\n return records\n\n def _train_single_model(self, model_desc=None, model_id=None):\n cls_trainer = ClassFactory.get_cls('trainer', \"Trainer\")\n step_name = self.task.step_name\n if model_desc is not None:\n sample = dict(worker_id=model_id, desc=model_desc, step_name=step_name)\n record = ReportRecord().load_dict(sample)\n logging.debug(\"Broadcast Record=%s\", str(record))\n Report().broadcast(record)\n trainer = cls_trainer(model_desc=model_desc, id=model_id)\n else:\n trainer = cls_trainer(None, 0)\n # resume training\n if vega.is_torch_backend() and General._resume:\n trainer.load_checkpoint = True\n trainer._resume_training = True\n if cls_trainer.config.distributed:\n self._do_distributed_fully_train()\n else:\n self._do_single_fully_train(trainer)\n\n def _train_single_gpu_model(self, trainer):\n evaluator = self._get_evaluator(trainer.worker_id)\n self.master.run(trainer, evaluator)\n\n def _train_single_npu_model(self, trainer):\n temp_rank_file = os.environ.get('RANK_TABLE_FILE', None)\n temp_rank_size = os.environ['RANK_SIZE']\n os.environ.pop('RANK_TABLE_FILE', None)\n os.environ['RANK_SIZE'] = '1'\n evaluator = self._get_evaluator(trainer.worker_id)\n self.master.run(trainer, evaluator)\n if temp_rank_file is not None:\n os.environ['RANK_TABLE_FILE'] = temp_rank_file\n os.environ['RANK_SIZE'] = temp_rank_size\n\n def _do_single_fully_train(self, trainer):\n if os.environ['DEVICE_CATEGORY'] == 'GPU':\n self._train_single_gpu_model(trainer)\n elif os.environ['DEVICE_CATEGORY'] == 'NPU':\n self._train_single_npu_model(trainer)\n\n def _train_multi_models(self, records):\n for record in records:\n self._train_single_model(record.desc, record.worker_id)\n self.master.join()\n\n def _get_evaluator(self, worker_id):\n if not PipeStepConfig.evaluator_enable:\n return None\n cls_evaluator = ClassFactory.get_cls('evaluator', \"Evaluator\")\n evaluator = cls_evaluator({\"step_name\": self.task.step_name, \"worker_id\": worker_id})\n return evaluator\n\n def _do_horovod_fully_train(self):\n pwd_dir = os.path.dirname(os.path.abspath(__file__))\n cf_file = os.path.join(pwd_dir, 'cf.pickle')\n cf_content = {'registry': ClassFactory.__registry__,\n 'general_config': General().to_json(),\n 'pipe_step_config': PipeStepConfig().to_json()}\n with open(cf_file, 'wb') as f:\n pickle.dump(cf_content, f)\n cf_file_remote = os.path.join(self.task.local_base_path, 'cf.pickle')\n FileOps.copy_file(cf_file, cf_file_remote)\n if os.environ.get('DLS_TASK_NUMBER') is None:\n # local cluster\n worker_ips = '127.0.0.1'\n if General.cluster.master_ip is not None and General.cluster.master_ip != '127.0.0.1':\n worker_ips = General.cluster.master_ip\n for ip in General.cluster.slaves:\n worker_ips = worker_ips + ',' + ip\n cmd = ['bash', '{}/horovod/run_cluster_horovod_train.sh'.format(pwd_dir),\n str(self.world_device_size), cf_file_remote, worker_ips]\n else:\n # Roma\n cmd = ['bash', '{}/horovod/run_horovod_train.sh'.format(pwd_dir),\n str(self.world_device_size), cf_file_remote]\n proc = subprocess.Popen(cmd, env=os.environ)\n proc.wait()\n\n def _do_hccl_fully_train(self):\n origin_parallel_fully_train = General.parallel_fully_train\n General.parallel_fully_train = True\n General.dft = True\n cls_trainer = ClassFactory.get_cls('trainer', \"Trainer\")\n self.master = create_master()\n workers_num = int(os.environ['RANK_SIZE'])\n for i in range(workers_num):\n trainer = cls_trainer(None, id=i)\n evaluator = self._get_evaluator(trainer.worker_id)\n self.master.run(trainer, evaluator)\n self.master.join()\n self.master.shutdown()\n General.parallel_fully_train = origin_parallel_fully_train\n General.dft = False\n\n def _do_distributed_fully_train(self):\n if os.environ['DEVICE_CATEGORY'] == 'GPU':\n self._do_horovod_fully_train()\n elif os.environ['DEVICE_CATEGORY'] == 'NPU':\n self._do_hccl_fully_train()\n\n @property\n def world_device_size(self):\n \"\"\"World device size is world size * device count in each world.\"\"\"\n import torch\n world_size = General.env.world_size\n device_nums = torch.cuda.device_count()\n num_devices = world_size * device_nums\n return num_devices\n",
"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"This is a class for Cifar100 dataset.\"\"\"\nfrom .utils.dataset import Dataset\nfrom zeus.datasets.transforms import Compose\nfrom zeus.common import ClassFactory, ClassType\nfrom zeus.common import FileOps\nfrom zeus.datasets.conf.cifar100 import Cifar100Config\nimport numpy as np\nimport os\nimport pickle\nfrom PIL import Image\n\n\[email protected](ClassType.DATASET)\nclass Cifar100(Dataset):\n \"\"\"This is a class for Cifar100 dataset.\n\n :param mode: `train`,`val` or `test`, defaults to `train`\n :type mode: str, optional\n :param cfg: the config the dataset need, defaults to None, and if the cfg is None,\n the default config will be used, the default config file is a yml file with the same name of the class\n :type cfg: yml, py or dict\n \"\"\"\n\n config = Cifar100Config()\n\n def __init__(self, **kwargs):\n \"\"\"Construct the Cifar10 class.\"\"\"\n Dataset.__init__(self, **kwargs)\n self.args.data_path = FileOps.download_dataset(self.args.data_path)\n is_train = self.mode == 'train' or self.mode == 'val' and self.args.train_portion < 1\n self.base_folder = 'cifar-100-python'\n self.transform = Compose(self.transforms.__transform__)\n if is_train:\n files_list = [\"train\"]\n else:\n files_list = ['test']\n\n self.data = []\n self.targets = []\n\n # now load the picked numpy arrays\n for file_name in files_list:\n file_path = os.path.join(self.args.data_path, self.base_folder, file_name)\n with open(file_path, 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n self.data.append(entry['data'])\n if 'labels' in entry:\n self.targets.extend(entry['labels'])\n else:\n self.targets.extend(entry['fine_labels'])\n\n self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)\n self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC\n\n def __getitem__(self, index):\n \"\"\"Get an item of the dataset according to the index.\n\n :param index: index\n :type index: int\n :return: an item of the dataset according to the index\n :rtype: tuple\n \"\"\"\n img, target = self.data[index], self.targets[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def __len__(self):\n \"\"\"Get the length of the dataset.\n\n :return: the length of the dataset\n :rtype: int\n \"\"\"\n return len(self.data)\n\n @property\n def input_channels(self):\n \"\"\"Input channels of the cifar100 image.\n\n :return: the input channels\n :rtype: int\n \"\"\"\n _shape = self.data.shape\n _input_channels = 3 if len(_shape) == 4 else 1\n return _input_channels\n\n @property\n def input_size(self):\n \"\"\"Input size of cifar100 image.\n\n :return: the input size\n :rtype: int\n \"\"\"\n _shape = self.data.shape\n return _shape[1]\n"
] | [
[
"torch.cuda.device_count"
],
[
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
expoli/Learn-tensorflow | [
"cc6b30c233678cf8a6f5da97fdf02ff49e810e61",
"cc6b30c233678cf8a6f5da97fdf02ff49e810e61"
] | [
"BEGINNER/ML_basics_with_Keras/Regression/Predict_fuel_efficiency.py",
"BEGINNER/ML_basics_with_Keras/Overfit_and_underfit/Overfit_and_underfit.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nprint(tf.__version__)\n\ndataset_path = keras.utils.get_file(\"auto-mpg.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data\")\ndataset_path\n\ncolumn_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',\n 'Acceleration', 'Model Year', 'Origin']\nraw_dataset = pd.read_csv(dataset_path, names=column_names,\n na_values=\"?\", comment='\\t',\n sep=\" \", skipinitialspace=True)\n\ndataset = raw_dataset.copy()\ndataset.tail()\n\ndataset.isna().sum()\n\ndataset = dataset.dropna()\n\norigin = dataset.pop('Origin')\n\ndataset['USA'] = (origin == 1) * 1.0\ndataset['Europe'] = (origin == 2) * 1.0\ndataset['Japan'] = (origin == 3) * 1.0\ndataset.tail()\n\ntrain_dataset = dataset.sample(frac=0.8, random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)\n\nsns.pairplot(train_dataset[[\"MPG\", \"Cylinders\", \"Displacement\", \"Weight\"]], diag_kind=\"kde\")\n\ntrain_stats = train_dataset.describe()\ntrain_stats.pop(\"MPG\")\ntrain_stats = train_stats.transpose()\ntrain_stats\n\ntrain_labels = train_dataset.pop('MPG')\ntest_labels = test_dataset.pop('MPG')\n\n\ndef norm(x):\n return (x - train_stats['mean']) / train_stats['std']\n\n\nnormed_train_data = norm(train_dataset)\nnormed_test_data = norm(test_dataset)\n\n\ndef build_model():\n model = keras.Sequential([\n layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),\n layers.Dense(64, activation='relu'),\n layers.Dense(1)\n ])\n\n optimizer = tf.keras.optimizers.RMSprop(0.001)\n\n model.compile(loss='mse',\n optimizer=optimizer,\n metrics=['mae', 'mse'])\n return model\n\n\nmodel = build_model()\n\nmodel.summary()\n\nexample_batch = normed_train_data[:10]\nexample_result = model.predict(example_batch)\nexample_result\n\n\n# 通过为每个完成的时期打印一个点来显示训练进度\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n\nEPOCHS = 1000\n\nhistory = model.fit(\n normed_train_data, train_labels,\n epochs=EPOCHS, validation_split=0.2, verbose=0,\n callbacks=[PrintDot()])\n\nhist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\nhist.tail()\n\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [MPG]')\n plt.plot(hist['epoch'], hist['mae'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mae'],\n label='Val Error')\n plt.ylim([0, 5])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$MPG^2$]')\n plt.plot(hist['epoch'], hist['mse'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mse'],\n label='Val Error')\n plt.ylim([0, 20])\n plt.legend()\n plt.show()\n\n\nplot_history(history)\n\nmodel = build_model()\n\n# patience 值用来检查改进 epochs 的数量\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\nhistory = model.fit(normed_train_data, train_labels, epochs=EPOCHS,\n validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\nplot_history(history)\n\nloss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)\n\nprint(\"Testing set Mean Abs Error: {:5.2f} MPG\".format(mae))\n\ntest_predictions = model.predict(normed_test_data).flatten()\n\nplt.scatter(test_labels, test_predictions)\nplt.xlabel('True Values [MPG]')\nplt.ylabel('Predictions [MPG]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0, plt.xlim()[1]])\nplt.ylim([0, plt.ylim()[1]])\n_ = plt.plot([-100, 100], [-100, 100])\n\nerror = test_predictions - test_labels\nplt.hist(error, bins=25)\nplt.xlabel(\"Prediction Error [MPG]\")\n_ = plt.ylabel(\"Count\")\n",
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\nprint(tf.__version__)\n\n# !pip install -q git+https://github.com/tensorflow/docs\n\n# import tensorflow_docs as tfdocs\n# import tensorflow_docs.modeling\n# import tensorflow_docs.plots\n\nfrom IPython import display\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\n\nimport pathlib\nimport shutil\nimport tempfile\n\nlogdir = pathlib.Path(tempfile.mkdtemp()) / \"tensorboard_logs\"\nshutil.rmtree(logdir, ignore_errors=True)\n\ngz = tf.keras.utils.get_file('HIGGS.csv.gz',\n 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz')\n\nFEATURES = 28\n\nds = tf.data.experimental.CsvDataset(gz, [float(), ] * (FEATURES + 1), compression_type=\"GZIP\")\n\n\ndef pack_row(*row):\n label = row[0]\n features = tf.stack(row[1:], 1)\n return features, label\n\n\npacked_ds = ds.batch(10000).map(pack_row).unbatch()\n\nfor features, label in packed_ds.batch(1000).take(1):\n print(features[0])\n plt.hist(features.numpy().flatten(), bins=101)\n\nN_VALIDATION = int(1e3)\nN_TRAIN = int(1e4)\nBUFFER_SIZE = int(1e4)\nBATCH_SIZE = 500\nSTEPS_PER_EPOCH = N_TRAIN // BATCH_SIZE\n\nvalidate_ds = packed_ds.take(N_VALIDATION).cache()\ntrain_ds = packed_ds.skip(N_VALIDATION).take(N_TRAIN).cache()\n\ntrain_ds\n\nvalidate_ds = validate_ds.batch(BATCH_SIZE)\ntrain_ds = train_ds.shuffle(BUFFER_SIZE).repeat().batch(BATCH_SIZE)\n\nlr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(\n 0.001,\n decay_steps=STEPS_PER_EPOCH * 1000,\n decay_rate=1,\n staircase=False)\n\n\ndef get_optimizer():\n return tf.keras.optimizers.Adam(lr_schedule)\n\n\nstep = np.linspace(0, 100000)\nlr = lr_schedule(step)\nplt.figure(figsize=(8, 6))\nplt.plot(step / STEPS_PER_EPOCH, lr)\nplt.ylim([0, max(plt.ylim())])\nplt.xlabel('Epoch')\n_ = plt.ylabel('Learning Rate')\n\n\ndef get_callbacks(name):\n return [\n tfdocs.modeling.EpochDots(),\n tf.keras.callbacks.EarlyStopping(monitor='val_binary_crossentropy', patience=200),\n tf.keras.callbacks.TensorBoard(logdir / name),\n ]\n\n\ndef compile_and_fit(model, name, optimizer=None, max_epochs=10000):\n if optimizer is None:\n optimizer = get_optimizer()\n model.compile(optimizer=optimizer,\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[\n tf.keras.losses.BinaryCrossentropy(\n from_logits=True, name='binary_crossentropy'),\n 'accuracy'])\n\n model.summary()\n\n history = model.fit(\n train_ds,\n steps_per_epoch=STEPS_PER_EPOCH,\n epochs=max_epochs,\n validation_data=validate_ds,\n callbacks=get_callbacks(name),\n verbose=0)\n return history\n\n\ntiny_model = tf.keras.Sequential([\n layers.Dense(16, activation='elu', input_shape=(FEATURES,)),\n layers.Dense(1)\n])\n\nsize_histories = {}\n\nsize_histories['Tiny'] = compile_and_fit(tiny_model, 'sizes/Tiny')\n\nplotter = tfdocs.plots.HistoryPlotter(metric='binary_crossentropy', smoothing_std=10)\nplotter.plot(size_histories)\nplt.ylim([0.5, 0.7])\n\nsmall_model = tf.keras.Sequential([\n # `input_shape` is only required here so that `.summary` works.\n layers.Dense(16, activation='elu', input_shape=(FEATURES,)),\n layers.Dense(16, activation='elu'),\n layers.Dense(1)\n])\n\nsize_histories['Small'] = compile_and_fit(small_model, 'sizes/Small')\n\nmedium_model = tf.keras.Sequential([\n layers.Dense(64, activation='elu', input_shape=(FEATURES,)),\n layers.Dense(64, activation='elu'),\n layers.Dense(64, activation='elu'),\n layers.Dense(1)\n])\n\nsize_histories['Medium'] = compile_and_fit(medium_model, \"sizes/Medium\")\n\nlarge_model = tf.keras.Sequential([\n layers.Dense(512, activation='elu', input_shape=(FEATURES,)),\n layers.Dense(512, activation='elu'),\n layers.Dense(512, activation='elu'),\n layers.Dense(512, activation='elu'),\n layers.Dense(1)\n])\n\nsize_histories['large'] = compile_and_fit(large_model, \"sizes/large\")\n\nplotter.plot(size_histories)\na = plt.xscale('log')\nplt.xlim([5, max(plt.xlim())])\nplt.ylim([0.5, 0.7])\nplt.xlabel(\"Epochs [Log Scale]\")\n\ndisplay.IFrame(\n src=\"https://tensorboard.dev/experiment/vW7jmmF9TmKmy3rbheMQpw/#scalars&_smoothingWeight=0.97\",\n width=\"100%\", height=\"800px\")\n\nshutil.rmtree(logdir / 'regularizers/Tiny', ignore_errors=True)\nshutil.copytree(logdir / 'sizes/Tiny', logdir / 'regularizers/Tiny')\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.layers.Dense",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"tensorflow.keras.utils.get_file",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.linspace",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xscale",
"tensorflow.stack",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.losses.BinaryCrossentropy",
"matplotlib.pyplot.plot",
"tensorflow.keras.optimizers.schedules.InverseTimeDecay",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.utils.get_file",
"matplotlib.pyplot.xlim",
"tensorflow.keras.callbacks.TensorBoard",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
M00mo/neuralpredictors | [
"0dd46f0bf03ec3fe53f6a796cbcbea09c4972932"
] | [
"neuralpredictors/data/datasets.py"
] | [
"import json\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom pathlib import Path\nfrom zipfile import ZipFile\n\nimport h5py\nimport numpy as np\nfrom scipy.signal import convolve2d\nfrom torch.utils.data import Dataset\n\nfrom .exceptions import InconsistentDataException, DoesNotExistException\nfrom .transforms import (\n DataTransform,\n MovieTransform,\n StaticTransform,\n Invertible,\n Subsequence,\n Delay,\n)\nfrom .utils import convert_static_h5_dataset_to_folder, zip_dir\nfrom ..utils import recursively_load_dict_contents_from_group\n\n\nclass AttributeHandler:\n def __init__(self, name, h5_handle):\n \"\"\"\n Can be used to turn a dataset within a hdf5 dataset into an attribute.\n\n Args:\n name: name of the dataset in the hdf5 file\n h5_handle: file handle for the hdf5 file\n \"\"\"\n assert name in h5_handle, \"{} must be in {}\".format(name, h5_handle)\n self.name = name\n self.h5_handle = h5_handle\n\n def __getattr__(self, item):\n if item in self.h5_handle[self.name]:\n ret = self.h5_handle[self.name][item][()]\n if ret.dtype.char == \"S\": # convert bytes to unicode\n ret = ret.astype(str)\n return ret\n else:\n raise AttributeError(\"Attribute {} not found\".format(item))\n\n def __getitem__(self, item):\n return getattr(self, item)\n\n def keys(self):\n return self.h5_handle[self.name].keys()\n\n def __dir__(self):\n attrs = set(super().__dir__())\n return attrs.union(set(self.h5_handle[self.name].keys()))\n\n\nclass AttributeTransformer(AttributeHandler):\n def __init__(self, name, h5_handle, transforms, data_group):\n \"\"\"\n Allows for id_transform of transforms to be applied to the\n specified attribute. Otherwise behaves like an AttributeHandler\n\n Args:\n name: see AttributeHandler\n h5_handle: see AttributeHandler\n transforms: the set of transforms that's supposed to be applied\n data_group: the data_key of the dataset that this attribute represents\n \"\"\"\n super().__init__(name, h5_handle)\n self.transforms = transforms\n self.data_group = data_group\n\n def __getattr__(self, item):\n ret = {self.data_group: super().__getattr__(item)}\n for tr in self.transforms:\n if hasattr(tr, \"id_transform\"):\n ret = tr.id_transform(ret)\n\n return ret[self.data_group]\n\n\nclass TransformDataset(Dataset):\n def __init__(self, transforms=None):\n \"\"\"\n Abstract Class for Datasets with transformations, providing `transform` and `invert` functions\n to apply data transformation on the elements.\n\n Args:\n transforms: list of transforms to be applied to each data point\n \"\"\"\n self.transforms = transforms or []\n\n def transform(self, x, exclude=None):\n \"\"\"\n Apply transform on a data element from the dataset\n\n Args:\n x (tuple): a data element from the dataset\n exclude (Transform, optional): Type of data transformer to be excluded from transform list. Defaults to None.\n\n Returns:\n tuple: transformed data element\n \"\"\"\n\n for tr in self.transforms:\n if exclude is None or not isinstance(tr, exclude):\n x = tr(x)\n return x\n\n def invert(self, x, exclude=None):\n for tr in reversed(\n filter(lambda tr: not isinstance(tr, exclude), self.transforms)\n ):\n if not isinstance(tr, Invertible):\n raise TypeError(\"Cannot invert\", tr.__class__.__name__)\n else:\n x = tr.inv(x)\n return x\n\n def __iter__(self):\n yield from map(self.__getitem__, range(len(self)))\n\n def __repr__(self):\n return (\n \"{} m={}:\\n\\t({})\".format(\n self.__class__.__name__, len(self), \", \".join(self.data_groups)\n )\n + \"\\n\\t[Transforms: \"\n + \"->\".join([repr(tr) for tr in self.transforms])\n + \"]\"\n )\n\n\nclass H5SequenceSet(TransformDataset):\n def __init__(\n self,\n filename,\n *data_keys,\n output_rename=None,\n transforms=None,\n output_dict=False\n ):\n super().__init__(transforms=transforms)\n\n self.output_dict = output_dict\n\n if output_rename is None:\n output_rename = {}\n\n # a flag that can be changed to turn renaming on/off\n self.rename_output = True\n\n self.output_rename = output_rename\n\n self._fid = h5py.File(filename, \"r\")\n self.data = self._fid\n self.data_loaded = False\n\n # ensure that all elements of\n m = None\n for key in data_keys:\n assert key in self.data, \"Could not find {} in file\".format(key)\n l = len(self.data[key])\n if m is not None and l != m:\n raise ValueError(\"groups have different length\")\n m = l\n self._len = m\n\n # Specify which types of transforms are accepted\n self._transform_set = DataTransform\n\n self.data_keys = data_keys\n self.transforms = transforms or []\n\n self.data_point = namedtuple(\"DataPoint\", data_keys)\n self.output_point = namedtuple(\n \"OutputPoint\", [output_rename.get(k, k) for k in data_keys]\n )\n\n def load_content(self):\n self.data = recursively_load_dict_contents_from_group(self._fid)\n self.data_loaded = True\n\n def unload_content(self):\n self.data = self._fid\n self.data_loaded = False\n\n def __len__(self):\n return self._len\n\n def __getitem__(self, item):\n x = self.data_point(\n *(\n np.array(self.data[g][item if self.data_loaded else str(item)])\n for g in self.data_keys\n )\n )\n for tr in self.transforms:\n assert isinstance(tr, self._transform_set)\n x = tr(x)\n\n # convert to output point\n if self.rename_output:\n x = self.output_point(*x)\n\n if self.output_dict:\n x = x._asdict\n return x\n\n def __getattr__(self, item):\n if item in self.data:\n item = self.data[item]\n if isinstance(item, h5py.Dataset):\n dtype = item.dtype\n item = item[()]\n if dtype.char == \"S\": # convert bytes to unicode\n item = item.astype(str)\n return item\n return item\n else:\n # TODO: check for a proper way to handle cases where super doesn't have __getattr__\n return super().__getattr__(item)\n\n def __repr__(self):\n names = [\n \"{} -> {}\".format(k, self.output_rename[k])\n if k in self.output_rename\n else k\n for k in self.data_keys\n ]\n s = \"{} m={}:\\n\\t({})\".format(\n self.__class__.__name__, len(self), \", \".join(names)\n )\n if self.transforms is not None:\n s += (\n \"\\n\\t[Transforms: \"\n + \"->\".join([repr(tr) for tr in self.transforms])\n + \"]\"\n )\n return s\n\n\nclass MovieSet(H5SequenceSet):\n \"\"\"\n Extension to H5SequenceSet with specific HDF5 dataset assumed. Specifically,\n it assumes that properties such as `neurons` and `stats` are present in the dataset.\n \"\"\"\n\n def __init__(\n self,\n filename,\n *data_groups,\n output_rename=None,\n transforms=None,\n stats_source=\"all\"\n ):\n super().__init__(\n filename, *data_groups, output_rename=output_rename, transforms=transforms\n )\n self.stats_source = stats_source\n\n # set to accept only MovieTransform\n self._transform_set = MovieTransform\n\n @property\n def neurons(self):\n return AttributeTransformer(\n \"neurons\", self.data, self.transforms, data_group=\"responses\"\n )\n\n @property\n def n_neurons(self):\n return len(self.neurons.unit_ids)\n\n @property\n def input_shape(self):\n name = (\n self.output_rename.get(\"inputs\", \"inputs\")\n if self.rename_output\n else \"inputs\"\n )\n return (1,) + getattr(self[0], name).shape\n\n def transformed_mean(self, stats_source=None):\n if stats_source is None:\n stats_source = self.stats_source\n\n tmp = [\n np.atleast_1d(self.statistics[g][stats_source][\"mean\"][()])\n for g in self.data_keys\n ]\n x = self.transform(self.data_point(*tmp), exclude=(Subsequence, Delay))\n if self.rename_output:\n x = self.output_point(*x)\n return x\n\n def rf_base(self, stats_source=\"all\"):\n N, c, t, w, h = self.img_shape\n t = min(t, 150)\n mean = lambda dk: self.statistics[dk][stats_source][\"mean\"][()]\n d = dict(\n inputs=np.ones((1, c, t, w, h)) * np.array(mean(\"inputs\")),\n eye_position=np.ones((1, t, 1)) * mean(\"eye_position\")[None, None, :],\n behavior=np.ones((1, t, 1)) * mean(\"behavior\")[None, None, :],\n responses=np.ones((1, t, 1)) * mean(\"responses\")[None, None, :],\n )\n return self.transform(\n self.data_point(*[d[dk] for dk in self.data_keys]), exclude=Subsequence\n )\n\n def rf_noise_stim(self, m, t, stats_source=\"all\"):\n \"\"\"\n Generates a Gaussian white noise stimulus filtered with a 3x3 Gaussian filter\n for the computation of receptive fields. The mean and variance of the Gaussian\n noise are set to the mean and variance of the stimulus ensemble.\n\n The behvavior, eye movement statistics, and responses are set to their respective means.\n Args:\n m: number of noise samples\n t: length in time\n\n Returns: tuple of input, behavior, eye, and response\n\n \"\"\"\n N, c, _, w, h = self.img_shape\n stat = lambda dk, what: self.statistics[dk][stats_source][what][()]\n mu, s = stat(\"inputs\", \"mean\"), stat(\"inputs\", \"std\")\n h_filt = np.float64(\n [[1 / 16, 1 / 8, 1 / 16], [1 / 8, 1 / 4, 1 / 8], [1 / 16, 1 / 8, 1 / 16]]\n )\n noise_input = (\n np.stack(\n [\n convolve2d(np.random.randn(w, h), h_filt, mode=\"same\")\n for _ in range(m * t * c)\n ]\n ).reshape((m, c, t, w, h))\n * s\n + mu\n )\n\n mean_beh = np.ones((m, t, 1)) * stat(\"behavior\", \"mean\")[None, None, :]\n mean_eye = np.ones((m, t, 1)) * stat(\"eye_position\", \"mean\")[None, None, :]\n mean_resp = np.ones((m, t, 1)) * stat(\"responses\", \"mean\")[None, None, :]\n\n d = dict(\n inputs=noise_input.astype(np.float32),\n eye_position=mean_eye.astype(np.float32),\n behavior=mean_beh.astype(np.float32),\n responses=mean_resp.astype(np.float32),\n )\n\n return self.transform(\n self.data_point(*[d[dk] for dk in self.data_groups.values()]),\n exclude=(Subsequence, Delay),\n )\n\n\ndefault_datapoint = namedtuple(\"DefaultDataPoint\", [\"images\", \"responses\"])\n\n\nclass StaticSet(TransformDataset):\n def __init__(self, *data_keys, transforms=None):\n \"\"\"\n Abstract class for static datasets. Defines data_keys and a corresponding datapoint.\n \"\"\"\n super().__init__(transforms=transforms)\n\n self.data_keys = data_keys\n if set(data_keys) == {\"images\", \"responses\"}:\n # this version IS serializable in pickle\n self.data_point = default_datapoint\n else:\n # this version is NOT - you cannot use this with a dataloader with num_workers > 1\n self.data_point = namedtuple(\"DataPoint\", data_keys)\n\n\nclass H5ArraySet(StaticSet):\n def __init__(self, filename, *data_keys, transforms=None):\n \"\"\"\n Dataset for static data stored in hdf5 files.\n\n Args:\n filename: filename of the hdf5 file\n *data_keys: data keys to be read from the file\n transforms: list of transforms applied to each datapoint\n \"\"\"\n super().__init__(*data_keys, transforms=transforms)\n\n self._fid = h5py.File(filename, \"r\")\n self.data = self._fid\n self.data_loaded = False\n m = None\n for key in data_keys:\n assert key in self.data, \"Could not find {} in file\".format(key)\n if m is None:\n m = len(self.data[key])\n else:\n assert m == len(self.data[key]), \"Length of datasets do not match\"\n self._len = m\n\n def load_content(self):\n self.data = recursively_load_dict_contents_from_group(self._fid)\n self.data_loaded = True\n\n def unload_content(self):\n self.data = self._fid\n self.data_loaded = False\n\n def __getitem__(self, item):\n x = self.data_point(*(self.data[g][item] for g in self.data_keys))\n for tr in self.transforms:\n assert isinstance(tr, StaticTransform)\n x = tr(x)\n return x\n\n def __iter__(self):\n yield from map(self.__getitem__, range(len(self)))\n\n def __len__(self):\n return self._len\n\n def __repr__(self):\n return \"\\n\".join(\n [\n \"Tensor {}: {} \".format(key, self.data[key].shape)\n for key in self.data_keys\n ]\n + [\"Transforms: \" + repr(self.transforms)]\n )\n\n def __getattr__(self, item):\n if item in self.data:\n item = self.data[item]\n if isinstance(item, h5py.Dataset):\n dtype = item.dtype\n item = item[()]\n if dtype.char == \"S\": # convert bytes to univcode\n item = item.astype(str)\n return item\n return item\n else:\n raise AttributeError(\n \"Item {} not found in {}\".format(item, self.__class__.__name__)\n )\n\n\nclass StaticImageSet(H5ArraySet):\n def __init__(\n self, filename, *data_keys, transforms=None, cache_raw=False, stats_source=None\n ):\n \"\"\"\n Dataset for h5 files.\n\n Args:\n filename: filename of the hdf5 file\n *data_keys: datasets to be extracted\n transforms: transforms applied to each data point\n cache_raw: whether to cache the raw (untransformed) datapoints\n stats_source: statistic source to be used.\n \"\"\"\n super().__init__(filename, *data_keys, transforms=transforms)\n self.cache_raw = cache_raw\n self.last_raw = None\n self.stats_source = stats_source if stats_source is not None else \"all\"\n\n @property\n def n_neurons(self):\n return len(self[0].responses)\n\n @property\n def neurons(self):\n return AttributeTransformer(\n \"neurons\", self.data, self.transforms, data_group=\"responses\"\n )\n\n @property\n def info(self):\n return AttributeHandler(\"item_info\", self.data)\n\n @property\n def img_shape(self):\n return (1,) + self[0].images.shape\n\n def transformed_mean(self, stats_source=None):\n if stats_source is None:\n stats_source = self.stats_source\n\n tmp = [\n np.atleast_1d(self.statistics[dk][stats_source][\"mean\"][()])\n for dk in self.data_keys\n ]\n return self.transform(self.data_point(*tmp))\n\n def __repr__(self):\n return super().__repr__() + (\n \"\\n\\t[Stats source: {}]\".format(self.stats_source)\n if self.stats_source is not None\n else \"\"\n )\n\n def __dir__(self):\n attrs = set(self.__dict__).union(set(dir(type(self))))\n return attrs.union(set(self.data.keys()))\n\n\nclass DirectoryAttributeHandler:\n def __init__(self, path, links=None):\n \"\"\"\n Class that can be used to represent a subdirectory of a FileTree as a property in a FileTree dataset.\n Caches already loaded data items.\n\n Args:\n path: path to the subdiretory (pathlib.Path object)\n \"\"\"\n self.links = links or {}\n self.path = path\n\n def __getattr__(self, item):\n temp_path = self.resolve_item_path(item)\n if temp_path.exists() and temp_path.is_dir():\n val = DirectoryAttributeHandler(temp_path, links=self.links)\n else:\n val = np.load(self.path / \"{}.npy\".format(item))\n return val\n\n def resolve_item_path(self, item):\n if item in self.links:\n item = self.links[item]\n return self.path / item\n\n def __getitem__(self, item):\n return getattr(self, item)\n\n def keys(self):\n return [e.stem for e in self.path.glob(\"*\")]\n\n def __dir__(self):\n attrs = set(super().__dir__())\n return attrs.union(set(self.keys())).union(set(self.links.keys()))\n\n\nclass DirectoryAttributeTransformer(DirectoryAttributeHandler):\n def __init__(self, path, transforms, data_group, links=None):\n \"\"\"\n Class that can be used to represent a subdirectory of a FileTree as a property in a FileTree dataset.\n Like DirectoryAttributeHandler but allows for id_transform of transforms to be applied to the\n specified attribute.\n\n Args:\n path: path to the subdiretory (pathlib.Path object)\n \"\"\"\n\n super().__init__(path, links=links)\n self.transforms = transforms\n self.data_group = data_group\n\n def __getattr__(self, item):\n ret = {self.data_group: super().__getattr__(item)}\n for tr in self.transforms:\n ret = tr.id_transform(ret)\n return ret[self.data_group]\n\n\nclass FileTreeDataset(StaticSet):\n def __init__(self, dirname, *data_keys, transforms=None):\n \"\"\"\n Dataset stored as a file tree. The tree needs to have the subdirs data, meta, meta/neurons, meta/statistics,\n and meta/trials. Please refer to convert_static_h5_dataset_to_folder in neuralpredictors.data.utils\n how to export an hdf5 file into that structure.\n\n\n Here is an example. Data directories with too many entries have trials as .npy files\n named 0.npy, 1.npy, ...\n The meta/trials subdirectory must have single .npy files with arrays that provide additional trial based\n meta data.\n\n static22564-2-13-preproc0\n ├── data\n │ ├── behavior [5955 entries exceeds filelimit, not opening dir]\n │ ├── images [5955 entries exceeds filelimit, not opening dir]\n │ ├── pupil_center [5955 entries exceeds filelimit, not opening dir]\n │ └── responses [5955 entries exceeds filelimit, not opening dir]\n └── meta\n ├── neurons\n │ ├── animal_ids.npy\n │ ├── area.npy\n │ ├── layer.npy\n │ ├── scan_idx.npy\n │ ├── sessions.npy\n │ └── unit_ids.npy\n ├── statistics\n │ ├── behavior\n │ │ ├── all\n │ │ │ ├── max.npy\n │ │ │ ├── mean.npy\n │ │ │ ├── median.npy\n │ │ │ ├── min.npy\n │ │ │ └── std.npy\n │ │ └── stimulus_frame\n │ │ ├── max.npy\n │ │ ├── mean.npy\n │ │ ├── median.npy\n │ │ ├── min.npy\n │ │ └── std.npy\n │ ├── images\n │ │ ├── all\n │ │ │ ├── max.npy\n │ │ │ ├── mean.npy\n │ │ │ ├── median.npy\n │ │ │ ├── min.npy\n │ │ │ └── std.npy\n │ │ └── stimulus_frame\n │ │ ├── max.npy\n │ │ ├── mean.npy\n │ │ ├── median.npy\n │ │ ├── min.npy\n │ │ └── std.npy\n │ ├── pupil_center\n │ │ ├── all\n │ │ │ ├── max.npy\n │ │ │ ├── mean.npy\n │ │ │ ├── median.npy\n │ │ │ ├── min.npy\n │ │ │ └── std.npy\n │ │ └── stimulus_frame\n │ │ ├── max.npy\n │ │ ├── mean.npy\n │ │ ├── median.npy\n │ │ ├── min.npy\n │ │ └── std.npy\n │ └── responses\n │ ├── all\n │ │ ├── max.npy\n │ │ ├── mean.npy\n │ │ ├── median.npy\n │ │ ├── min.npy\n │ │ └── std.npy\n │ └── stimulus_frame\n │ ├── max.npy\n │ ├── mean.npy\n │ ├── median.npy\n │ ├── min.npy\n │ └── std.npy\n └── trials [12 entries exceeds filelimit, not opening dir]\n\n Args:\n dirname: root directory name\n *data_keys: data items to be extraced (must be subdirectories of root/data)\n transforms: transforms to be applied to the data (see TransformDataset)\n \"\"\"\n super().__init__(*data_keys, transforms=transforms)\n\n number_of_files = []\n\n if dirname.endswith(\".zip\"):\n if not Path(dirname[:-4]).exists():\n self.unzip(dirname, Path(dirname).absolute().parent)\n else:\n print(\n \"{} exists already. Not unpacking {}\".format(dirname[:-4], dirname)\n )\n\n dirname = dirname[:-4]\n\n self.basepath = Path(dirname).absolute()\n self._config_file = self.basepath / \"config.json\"\n\n if not self._config_file.exists():\n self._save_config(self._default_config)\n\n\n for data_key in data_keys:\n if data_key not in self.trial_info.keys():\n datapath = self.resolve_data_path(data_key)\n number_of_files.append(len(list(datapath.glob(\"*\"))))\n else:\n number_of_files.append(len(self.trial_info[data_key]))\n\n if not np.all(np.diff(number_of_files) == 0):\n raise InconsistentDataException(\"Number of data points is not equal\")\n else:\n self._len = number_of_files[0]\n\n self._cache = {data_key: {} for data_key in data_keys}\n\n _default_config = {\"links\": {}}\n\n def resolve_data_path(self, data_key):\n if self.link_exists(data_key):\n data_key = self.config[\"links\"][data_key]\n datapath = self.basepath / \"data\" / data_key\n\n if not datapath.exists():\n raise DoesNotExistException(\"Data path {} does not exist\".format(datapath))\n return datapath\n\n def link_exists(self, link):\n return \"links\" in self.config and link in self.config[\"links\"]\n\n @property\n def config(self):\n with open(self._config_file) as fid:\n return json.load(fid)\n\n def _save_config(self, cfg):\n with open(self._config_file, \"w\") as fid:\n return json.dump(cfg, fid)\n\n def __len__(self):\n return self._len\n\n def __getitem__(self, item):\n # load data from cache or disk\n ret = []\n for data_key in self.data_keys:\n if item in self._cache[data_key]:\n ret.append(self._cache[data_key][item])\n else:\n if data_key in self.trial_info.keys():\n val = self.trial_info[data_key][item:item+1]\n else:\n datapath = self.resolve_data_path(data_key)\n val = np.load(datapath / \"{}.npy\".format(item))\n self._cache[data_key][item] = val\n ret.append(val)\n\n # create data point and transform\n x = self.data_point(*ret)\n for tr in self.transforms:\n assert isinstance(tr, StaticTransform)\n x = tr(x)\n return x\n\n def add_log_entry(self, msg):\n timestamp = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n with open(self.basepath / \"change.log\", \"a+\") as fid:\n fid.write(\"{}: {}\\n\".format(timestamp, msg))\n\n @staticmethod\n def match_order(target, permuted, not_exist_ok=False):\n \"\"\"\n Matches the order or rows in permuted to by returning an index array such that.\n\n Args:\n not_exist_ok: if the element does not exist, don't return an index\n\n Returns: index array `idx` such that `target == permuted[idx, :]`\n \"\"\"\n\n order, target_idx = [], []\n unmatched_counter = 0\n for i, row in enumerate(target):\n idx = np.sum(permuted - row, axis=1) == 0\n if not not_exist_ok:\n assert idx.sum() == 1\n if idx.sum() == 1:\n order.append(np.where(idx)[0][0])\n target_idx.append(i)\n else:\n unmatched_counter += 1\n if not_exist_ok:\n print(\"Encountered {} unmatched elements\".format(unmatched_counter))\n return np.array(target_idx, dtype=int), np.array(order, dtype=int)\n\n def add_neuron_meta(\n self, name, animal_id, session, scan_idx, unit_id, values, fill_missing=None\n ):\n \"\"\"\n Add new meta information about neurons.\n\n Args:\n name: name of the new meta information\n animal_id: array with animal_ids per first dimension of values\n session: array with session per first dimension of values\n scan_idx: array with scan_idx per first dimension of values\n unit_id: array with unit_id per first dimension of values\n values: new meta information. First dimension must refer to neurons.\n fill_missing: fill the values of the new attribute with NaN if not provided\n \"\"\"\n if (\n not len(animal_id)\n == len(session)\n == len(scan_idx)\n == len(unit_id)\n == len(values)\n ):\n raise InconsistentDataException(\n \"number of trials and identifiers not consistent\"\n )\n\n target = np.c_[\n (\n self.neurons.animal_ids,\n self.neurons.sessions,\n self.neurons.scan_idx,\n self.neurons.unit_ids,\n )\n ]\n permuted = np.c_[(animal_id, session, scan_idx, unit_id)]\n vals = np.ones((len(target),) + values.shape[1:], dtype=values.dtype) * (\n np.nan if fill_missing is None else fill_missing\n )\n tidx, idx = self.match_order(\n target, permuted, not_exist_ok=fill_missing is not None\n )\n\n assert (\n np.sum(target[tidx] - permuted[idx, ...]) == 0\n ), \"Something went wrong in sorting\"\n\n vals[tidx, ...] = values[idx, ...]\n np.save(self.basepath / \"meta/neurons/{}.npy\".format(name), vals)\n self.add_log_entry(\n \"Added new neuron meta attribute {} to meta/neurons\".format(name)\n )\n\n @staticmethod\n def initialize_from(filename, outpath=None, overwrite=False, ignore_all_behaviors=False):\n \"\"\"\n Convenience function. See `convert_static_h5_dataset_to_folder` in `.utils`\n \"\"\"\n convert_static_h5_dataset_to_folder(\n filename, outpath=outpath, overwrite=overwrite, ignore_all_behaviors=ignore_all_behaviors\n )\n\n @property\n def change_log(self):\n if (self.basepath / \"change.log\").exists():\n with open(self.basepath / \"change.log\", \"r\") as fid:\n print(\"\".join(fid.readlines()))\n\n def zip(self, filename=None):\n \"\"\"\n Zips current dataset.\n\n Args:\n filename: Filename for the zip. Directory name + zip by default.\n \"\"\"\n\n if filename is None:\n filename = str(self.basepath) + \".zip\"\n zip_dir(filename, self.basepath)\n\n def unzip(self, filename, path):\n print(\"Unzipping {} into {}\".format(filename, path))\n with ZipFile(filename, \"r\") as zip_obj:\n zip_obj.extractall(path)\n\n def add_link(self, attr, new_name):\n \"\"\"\n Add a new dataset that links to an existing dataset.\n\n For instance `targets` that links to `responses`\n\n Args:\n attr: existing attribute such as `responses`\n new_name: name of the new attribute reference.\n \"\"\"\n if not (self.basepath / \"data/{}\".format(attr)).exists():\n raise DoesNotExistException(\"Link target does not exist\")\n\n if (self.basepath / \"data/{}\".format(new_name)).exists():\n raise FileExistsError(\"Link target already exists\")\n\n config = self.config\n if not \"links\" in config:\n config[\"links\"] = {}\n config[\"links\"][new_name] = attr\n self._save_config(config)\n\n @property\n def n_neurons(self):\n return len(self[0].responses)\n\n @property\n def neurons(self):\n return DirectoryAttributeTransformer(\n self.basepath / \"meta/neurons\",\n self.transforms,\n data_group=\"responses\" if \"responses\" in self.data_keys else \"targets\",\n )\n\n @property\n def trial_info(self):\n return DirectoryAttributeHandler(self.basepath / \"meta/trials\")\n\n @property\n def statistics(self):\n return DirectoryAttributeHandler(\n self.basepath / \"meta/statistics\", self.config[\"links\"]\n )\n\n @property\n def img_shape(self):\n return (1,) + self[0].images.shape\n\n def __repr__(self):\n return \"{} {} (n={} items)\\n\\t{}\".format(\n self.__class__.__name__, self.basepath, self._len, \", \".join(self.data_keys)\n )\n"
] | [
[
"numpy.ones",
"numpy.atleast_1d",
"numpy.diff",
"numpy.float64",
"numpy.random.randn",
"numpy.array",
"numpy.where",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iserh/data-augmentation | [
"1e1e99177ff4256c68cafe043bd7e50d52bf669d"
] | [
"src/vae/models/architectures/model_v3.py"
] | [
"\"\"\"Variational autoencoder module class.\"\"\"\nfrom typing import Tuple\n\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom utils import init_weights\nfrom vae.models.base import Decoder, Encoder, VAEConfig, VAEModel\n\n\nclass _Encoder(Encoder):\n def __init__(self, z_dim: int, num_features: int) -> None:\n super(_Encoder, self).__init__()\n self.linear_stage = nn.Sequential(\n nn.Linear(num_features, 512),\n nn.ReLU(inplace=True),\n nn.Linear(512, 256),\n nn.ReLU(inplace=True),\n nn.Linear(256, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, 64),\n nn.ReLU(inplace=True),\n )\n # Encoder mean\n self.mean = nn.Linear(64, z_dim)\n # Encoder Variance log\n self.variance_log = nn.Linear(64, z_dim)\n\n # initialize weights\n self.linear_stage.apply(init_weights)\n self.mean.apply(init_weights)\n self.variance_log.apply(init_weights)\n\n def forward(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n x = self.linear_stage(x)\n return self.mean(x), self.variance_log(x)\n\n\nclass _Decoder(Decoder):\n def __init__(self, z_dim: int, num_features: int) -> None:\n super(_Decoder, self).__init__()\n self.linear_stage = nn.Sequential(\n nn.Linear(z_dim, 64),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(64, 128),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(128, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, num_features),\n nn.Sigmoid(),\n )\n\n # initialize weights\n self.linear_stage.apply(init_weights)\n\n def forward(self, x: Tensor) -> Tensor:\n return self.linear_stage(x)\n\n\nclass VAEModelV3(VAEModel):\n def __init__(self, config: VAEConfig) -> None:\n super().__init__(config)\n self.encoder = _Encoder(config.z_dim, num_features=8)\n self.decoder = _Decoder(config.z_dim, num_features=8)\n\n\ndef _get_model_constructor() -> VAEModelV3:\n return VAEModelV3\n"
] | [
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.nn.LeakyReLU",
"torch.nn.Sigmoid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joshp112358/Cirq | [
"c4fac27a9849e589ee05b4f702f2d7c9049aaeea",
"c4fac27a9849e589ee05b4f702f2d7c9049aaeea",
"c4fac27a9849e589ee05b4f702f2d7c9049aaeea"
] | [
"cirq/ops/controlled_operation.py",
"cirq/protocols/commutes_protocol.py",
"cirq/ops/eigen_gate_test.py"
] | [
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import (\n cast,\n Any,\n Collection,\n Optional,\n Sequence,\n Tuple,\n Union,\n TYPE_CHECKING,\n)\n\nimport itertools\nimport numpy as np\n\nfrom cirq import protocols, linalg, value\nfrom cirq.ops import raw_types, gate_operation, controlled_gate\nfrom cirq.type_workarounds import NotImplementedType\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass ControlledOperation(raw_types.Operation):\n \"\"\"Augments existing operations to have one or more control qubits.\n\n This object is typically created via `operation.controlled_by(*qubits)`.\n \"\"\"\n\n def __init__(self,\n controls: Sequence[raw_types.Qid],\n sub_operation: 'cirq.Operation',\n control_values: Optional[Sequence[\n Union[int, Collection[int]]]] = None):\n if control_values is None:\n control_values = ((1,),) * len(controls)\n if len(control_values) != len(controls):\n raise ValueError('len(control_values) != len(controls)')\n # Convert to sorted tuples\n self.control_values = cast(\n Tuple[Tuple[int, ...], ...],\n tuple((val,) if isinstance(val, int) else tuple(sorted(val))\n for val in control_values))\n # Verify control values not out of bounds\n for q, val in zip(controls, self.control_values):\n if not all(0 <= v < q.dimension for v in val):\n raise ValueError(\n 'Control values <{!r}> outside of range for qubit '\n '<{!r}>.'.format(val, q))\n\n if not isinstance(sub_operation, ControlledOperation):\n self.controls = tuple(controls)\n self.sub_operation = sub_operation\n else:\n # Auto-flatten nested controlled operations.\n self.controls = tuple(controls) + sub_operation.controls\n self.sub_operation = sub_operation.sub_operation\n self.control_values += sub_operation.control_values\n\n @property\n def gate(self) -> Optional['cirq.ControlledGate']:\n if self.sub_operation.gate is None:\n return None\n return controlled_gate.ControlledGate(\n self.sub_operation.gate,\n control_values=self.control_values,\n control_qid_shape=[q.dimension for q in self.controls])\n\n @property\n def qubits(self):\n return self.controls + self.sub_operation.qubits\n\n def with_qubits(self, *new_qubits):\n n = len(self.controls)\n return ControlledOperation(\n new_qubits[:n], self.sub_operation.with_qubits(*new_qubits[n:]),\n self.control_values)\n\n def _decompose_(self):\n result = protocols.decompose_once(self.sub_operation, NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [\n ControlledOperation(self.controls, op, self.control_values)\n for op in result\n ]\n\n def _value_equality_values_(self):\n return (frozenset(zip(self.controls,\n self.control_values)), self.sub_operation)\n\n def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:\n n = len(self.controls)\n sub_n = len(args.axes) - n\n sub_axes = args.axes[n:]\n for control_vals in itertools.product(*self.control_values):\n active = (..., *(slice(v, v + 1) for v in control_vals),\n *(slice(None),) * sub_n)\n target_view = args.target_tensor[active]\n buffer_view = args.available_buffer[active]\n result = protocols.apply_unitary(self.sub_operation,\n protocols.ApplyUnitaryArgs(\n target_view, buffer_view,\n sub_axes),\n default=NotImplemented)\n\n if result is NotImplemented:\n return NotImplemented\n\n if result is not target_view:\n # HACK: assume they didn't somehow escape the slice view and\n # edit the rest of target_tensor.\n target_view[...] = result\n\n return args.target_tensor\n\n def _has_unitary_(self) -> bool:\n return protocols.has_unitary(self.sub_operation)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n sub_matrix = protocols.unitary(self.sub_operation, None)\n if sub_matrix is None:\n return NotImplemented\n qid_shape = protocols.qid_shape(self)\n sub_n = len(qid_shape) - len(self.controls)\n tensor = linalg.eye_tensor(qid_shape, dtype=sub_matrix.dtype)\n sub_tensor = sub_matrix.reshape(qid_shape[len(self.controls):] * 2)\n for control_vals in itertools.product(*self.control_values):\n active = (*(v for v in control_vals), *(slice(None),) * sub_n) * 2\n tensor[active] = sub_tensor\n return tensor.reshape((np.prod(qid_shape, dtype=int),) * 2)\n\n def __str__(self):\n if set(self.control_values) == {(1,)}:\n\n def get_prefix(control_vals):\n return 'C'\n else:\n\n def get_prefix(control_vals):\n return 'C{}'.format(''.join(map(str, sorted(control_vals))))\n\n prefix = ''.join(map(get_prefix, self.control_values))\n if isinstance(self.sub_operation, gate_operation.GateOperation):\n return '{}{}({})'.format(prefix, self.sub_operation.gate,\n ', '.join(map(str, self.qubits)))\n return '{}({}, {})'.format(prefix,\n ', '.join(str(q) for q in self.controls),\n str(self.sub_operation))\n\n def __repr__(self):\n return ('cirq.ControlledOperation(controls={!r}, sub_operation={!r}, '\n 'control_values={!r})'.format(self.controls, self.sub_operation,\n self.control_values))\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.sub_operation)\n\n def _resolve_parameters_(self, resolver):\n new_sub_op = protocols.resolve_parameters(self.sub_operation, resolver)\n return ControlledOperation(self.controls, new_sub_op,\n self.control_values)\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n u = protocols.unitary(self.sub_operation, default=None)\n if u is None:\n return NotImplemented\n angle_list = np.append(np.angle(np.linalg.eigvals(u)), 0)\n return protocols.trace_distance_from_angle_list(angle_list)\n\n def __pow__(self, exponent: Any) -> 'ControlledOperation':\n new_sub_op = protocols.pow(self.sub_operation,\n exponent,\n NotImplemented)\n if new_sub_op is NotImplemented:\n return NotImplemented\n return ControlledOperation(self.controls, new_sub_op,\n self.control_values)\n\n def _circuit_diagram_info_(self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> Optional['protocols.CircuitDiagramInfo']:\n n = len(self.controls)\n\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=(args.known_qubit_count - n\n if args.known_qubit_count is not None else None),\n known_qubits=(args.known_qubits[n:]\n if args.known_qubits is not None else None),\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n qubit_map=args.qubit_map)\n sub_info = protocols.circuit_diagram_info(self.sub_operation,\n sub_args,\n None)\n if sub_info is None:\n return NotImplemented\n\n def get_symbol(vals):\n if tuple(vals) == (1,):\n return '@'\n return '({})'.format(','.join(map(str, vals)))\n\n wire_symbols = (*(get_symbol(vals) for vals in self.control_values),\n *sub_info.wire_symbols)\n return protocols.CircuitDiagramInfo(\n wire_symbols=wire_symbols,\n exponent=sub_info.exponent,\n exponent_qubit_index=None if sub_info.exponent_qubit_index is None\n else sub_info.exponent_qubit_index + 1)\n\n def _json_dict_(self):\n return {\n 'cirq_type': self.__class__.__name__,\n 'controls': self.controls,\n 'control_values': self.control_values,\n 'sub_operation': self.sub_operation,\n }\n",
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Protocol for determining commutativity.\"\"\"\n\nfrom typing import Any, TypeVar, Union\n\nimport numpy as np\n\nfrom typing_extensions import Protocol\n\nfrom cirq import linalg, ops\nfrom cirq._doc import document\nfrom cirq.protocols import qid_shape_protocol, unitary_protocol\nfrom cirq.type_workarounds import NotImplementedType\n\n# This is a special indicator value used by the unitary method to determine\n# whether or not the caller provided a 'default' argument.\n# It is checked for using `is`, so it won't have a false positive if the user\n# provides a different np.array([]) value.\nRaiseTypeErrorIfNotProvided = np.array([])\n\nTDefault = TypeVar('TDefault')\n\n\nclass SupportsCommutes(Protocol):\n \"\"\"An object that can determine commutation relationships vs others.\"\"\"\n\n @document\n def _commutes_(self, other: Any,\n atol: float) -> Union[None, bool, NotImplementedType]:\n r\"\"\"Determines if this object commutes with the other object.\n\n Can return None to indicate the commutation relationship is\n indeterminate (e.g. incompatible matrix sizes). Can return\n NotImplemented to indicate to the caller that they should try some other\n way of determining the commutation relationship.\n\n Args:\n other: The other object that may or may not commute with the\n receiving object.\n atol: Absolute error tolerance. Some objects that commute may appear\n to not quite commute, due to rounding error from floating point\n computations. This parameter indicates an acceptable level of\n deviation from exact commutativity. The exact meaning of what\n error is being tolerated is not specified. It could be the\n maximum angle between rotation axes in the Bloch sphere, or the\n maximum trace of the absolute value of the commutator, or\n some other value convenient to the implementor of the method.\n\n Returns:\n Whether or not the values commute.\n\n True: `self` commutes with `other` within absolute tolerance `atol`.\n\n False: `self` does not commute with `other`.\n\n None: There is not a well defined commutation result. For example,\n whether or not parameterized operations will commute may depend\n on the parameter values and so is indeterminate.\n\n NotImplemented: Unable to determine anything about commutativity.\n Consider falling back to other strategies, such as asking\n `other` if it commutes with `self` or computing the unitary\n matrices of both values.\n \"\"\"\n\n\ndef commutes(v1: Any,\n v2: Any,\n *,\n atol: Union[int, float] = 1e-8,\n default: TDefault = RaiseTypeErrorIfNotProvided\n ) -> Union[bool, TDefault]:\n \"\"\"Determines whether two values commute.\n\n This is determined by any one of the following techniques:\n\n - Either value has a `_commutes_` method that returns 'True', 'False', or\n 'None' (meaning indeterminate). If both methods either don't exist or\n return `NotImplemented` then another strategy is tried. `v1._commutes_`\n is tried before `v2._commutes_`.\n - Both values are matrices. The return value is determined by checking if\n v1 @ v2 - v2 @ v1 is sufficiently close to zero.\n - Both values are `cirq.Operation` instances. If the operations apply to\n disjoint qubit sets then they commute. Otherwise, if they have unitary\n matrices, those matrices are checked for commutativity (while accounting\n for the fact that the operations may have different qubit orders or only\n partially overlap).\n\n If none of these techniques succeeds, the commutativity is assumed to be\n indeterminate.\n\n Args:\n v1: One of the values to check for commutativity. Can be a cirq object\n such as an operation, or a numpy matrix.\n v2: The other value to check for commutativity. Can be a cirq object\n such as an operation, or a numpy matrix.\n default: A fallback value to return, instead of raising a ValueError, if\n it is indeterminate whether or not the two values commute.\n atol: Absolute error tolerance. If all entries in v1@v2 - v2@v1 have a\n magnitude less than this tolerance, v1 and v2 can be reported as\n commuting. Defaults to 1e-8.\n\n Returns:\n True: `v1` and `v2` commute (or approximately commute).\n False: `v1` and `v2` don't commute.\n default: The commutativity of `v1` and `v2` is indeterminate, or could\n not be determined, and the `default` argument was specified.\n\n Raises:\n TypeError: The commutativity of `v1` and `v2` is indeterminate, or could\n not be determined, and the `default` argument was not specified.\n \"\"\"\n atol = float(atol)\n\n strats = [\n _strat_commutes_from_commutes,\n _strat_commutes_from_matrix,\n _strat_commutes_from_operation,\n ]\n for strat in strats:\n result = strat(v1, v2, atol=atol)\n if result is None:\n break\n if result is not NotImplemented:\n return result\n if default is not RaiseTypeErrorIfNotProvided:\n return default\n raise TypeError(\n f\"Failed to determine whether or not \"\n f\"{v1!r} commutes with {v2!r}. \"\n f\"The result may be indeterminate, or there may be no strategy \"\n f\"implemented to handle this case.\\n\"\n f\"If you want a default result in this case, specify a `default=` \"\n f\"argument or use `cirq.definitely_commutes`.\")\n\n\ndef definitely_commutes(v1: Any, v2: Any, *,\n atol: Union[int, float] = 1e-8) -> bool:\n \"\"\"Determines whether two values definitely commute.\n\n Returns:\n True: The two values definitely commute.\n False: The two values may or may not commute.\n \"\"\"\n return commutes(v1, v2, atol=atol, default=False)\n\n\ndef _strat_commutes_from_commutes(v1: Any,\n v2: Any,\n *,\n atol: Union[int, float] = 1e-8\n ) -> Union[bool, NotImplementedType, None]:\n \"\"\"Attempts to determine commutativity via the objects' _commutes_\n method.\"\"\"\n\n for a, b in [(v1, v2), (v2, v1)]:\n getter = getattr(a, '_commutes_', None)\n if getter is None:\n continue\n val = getter(b, atol=atol)\n if val is not NotImplemented:\n return val\n return NotImplemented\n\n\ndef _strat_commutes_from_matrix(\n v1: Any,\n v2: Any,\n *,\n atol: float,\n) -> Union[bool, NotImplementedType, None]:\n \"\"\"Attempts to determine commutativity of matrices.\"\"\"\n if not isinstance(v1, np.ndarray) or not isinstance(v2, np.ndarray):\n return NotImplemented\n if v1.shape != v2.shape:\n return None\n return linalg.matrix_commutes(v1, v2, atol=atol)\n\n\ndef _strat_commutes_from_operation(\n v1: Any,\n v2: Any,\n *,\n atol: float,\n) -> Union[bool, NotImplementedType, None]:\n if not isinstance(v1, ops.Operation) or not isinstance(v2, ops.Operation):\n return NotImplemented\n\n if set(v1.qubits).isdisjoint(v2.qubits):\n return True\n\n from cirq import circuits\n circuit12 = circuits.Circuit(v1, v2)\n circuit21 = circuits.Circuit(v2, v1)\n\n # Don't create gigantic matrices.\n if np.product(qid_shape_protocol.qid_shape(circuit12)) > 2**10:\n return NotImplemented # coverage: ignore\n\n m12 = unitary_protocol.unitary(circuit12, default=None)\n m21 = unitary_protocol.unitary(circuit21, default=None)\n if m12 is None:\n return NotImplemented\n return np.allclose(m12, m21, atol=atol)\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\nimport sympy\n\nimport cirq\nfrom cirq import value\nfrom cirq.testing import assert_has_consistent_trace_distance_bound\n\n\n\nclass CExpZinGate(cirq.EigenGate, cirq.TwoQubitGate):\n \"\"\"Two-qubit gate for the following matrix:\n [1 0 0 0]\n [0 1 0 0]\n [0 0 i 0]\n [0 0 0 -i]\n \"\"\"\n\n def __init__(self, quarter_turns: value.TParamVal) -> None:\n super().__init__(exponent=quarter_turns)\n\n @property\n def exponent(self):\n return self._exponent\n\n def _with_exponent(self, exponent):\n return CExpZinGate(exponent)\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 1, 0, 0])),\n (0.5, np.diag([0, 0, 1, 0])),\n (-0.5, np.diag([0, 0, 0, 1])),\n ]\n\n\nclass ZGateDef(cirq.EigenGate, cirq.TwoQubitGate):\n @property\n def exponent(self):\n return self._exponent\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0])),\n (1, np.diag([0, 1])),\n ]\n\n\ndef test_approximate_common_period():\n from cirq.ops.eigen_gate import _approximate_common_period as f\n\n assert f([]) is None\n assert f([0]) is None\n assert f([1, 0]) is None\n assert f([np.e, np.pi]) is None\n\n assert f([1]) == 1\n assert f([-1]) == 1\n assert f([2.5]) == 2.5\n assert f([1.5, 2]) == 6\n assert f([2, 3]) == 6\n assert abs(f([1 / 3, 2 / 3]) - 2 / 3) < 1e-8\n assert abs(f([2 / 5, 3 / 5]) - 6 / 5) < 1e-8\n assert f([0.5, -0.5]) == 0.5\n np.testing.assert_allclose(f([np.e]), np.e, atol=1e-8)\n\n\ndef test_init():\n assert CExpZinGate(1).exponent == 1\n assert CExpZinGate(0.5).exponent == 0.5\n assert CExpZinGate(4.5).exponent == 4.5\n assert CExpZinGate(1.5).exponent == 1.5\n assert CExpZinGate(3.5).exponent == 3.5\n assert CExpZinGate(sympy.Symbol('a')).exponent == sympy.Symbol('a')\n\n assert ZGateDef(exponent=0.5).exponent == 0.5\n\n\ndef test_eq():\n eq = cirq.testing.EqualsTester()\n eq.make_equality_group(lambda: CExpZinGate(quarter_turns=0.1))\n eq.add_equality_group(CExpZinGate(0), CExpZinGate(4), CExpZinGate(-4))\n\n # Equates by canonicalized period.\n eq.add_equality_group(CExpZinGate(1.5), CExpZinGate(41.5))\n eq.add_equality_group(CExpZinGate(3.5), CExpZinGate(-0.5))\n\n eq.add_equality_group(CExpZinGate(2.5))\n eq.add_equality_group(CExpZinGate(2.25))\n eq.make_equality_group(lambda: sympy.Symbol('a'))\n eq.add_equality_group(sympy.Symbol('b'))\n\n eq.add_equality_group(ZGateDef(exponent=0.5,\n global_shift=0.0))\n eq.add_equality_group(ZGateDef(exponent=-0.5,\n global_shift=0.0))\n eq.add_equality_group(ZGateDef(exponent=0.5,\n global_shift=0.5))\n eq.add_equality_group(ZGateDef(exponent=1.0,\n global_shift=0.5))\n\n\ndef test_approx_eq():\n assert cirq.approx_eq(CExpZinGate(1.5), CExpZinGate(1.5), atol=0.1)\n assert cirq.approx_eq(CExpZinGate(1.5), CExpZinGate(1.7), atol=0.3)\n assert not cirq.approx_eq(CExpZinGate(1.5), CExpZinGate(1.7), atol=0.1)\n\n assert cirq.approx_eq(\n ZGateDef(exponent=1.5),\n ZGateDef(exponent=1.5),\n atol=0.1\n )\n assert not cirq.approx_eq(\n CExpZinGate(1.5),\n ZGateDef(exponent=1.5),\n atol=0.1\n )\n assert not cirq.approx_eq(\n ZGateDef(exponent=1.5),\n ZGateDef(exponent=sympy.Symbol('a')),\n atol=0.1\n )\n\n assert cirq.approx_eq(\n CExpZinGate(sympy.Symbol('a')),\n CExpZinGate(sympy.Symbol('a')),\n atol=0.1\n )\n assert not cirq.approx_eq(\n CExpZinGate(sympy.Symbol('a')),\n CExpZinGate(sympy.Symbol('b')),\n atol=0.1\n )\n\n\ndef test_approx_eq_periodic():\n assert cirq.approx_eq(CExpZinGate(1.5), CExpZinGate(5.5), atol=1e-9)\n assert cirq.approx_eq(CExpZinGate(1.5), CExpZinGate(9.5), atol=1e-9)\n assert cirq.approx_eq(CExpZinGate(-2.5), CExpZinGate(1.5), atol=1e-9)\n assert not cirq.approx_eq(CExpZinGate(0), CExpZinGate(1.5), atol=1e-9)\n\n # The tests below do not work with usual canonical exponent comparison.\n assert cirq.approx_eq(CExpZinGate(0 - 1e-10), CExpZinGate(0), atol=1e-9)\n assert cirq.approx_eq(CExpZinGate(0), CExpZinGate(4 - 1e-10), atol=1e-9)\n\n\ndef test_period():\n class Components(cirq.EigenGate, cirq.TwoQubitGate):\n def __init__(self, a, b, c, d):\n super().__init__()\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n\n def _eigen_components(self):\n return [\n (self.a, np.diag([1, 0, 0, 0])),\n (self.b, np.diag([0, 1, 0, 0])),\n (self.c, np.diag([0, 0, 1, 0])),\n (self.d, np.diag([0, 0, 0, 1])),\n ]\n\n assert Components(0, 0, 0, 0)._period() is None\n assert Components(1, 0, 0, 0)._period() == 2\n assert Components(0.5, 0, 0, 0)._period() == 4\n assert Components(1 / 3, 0, 0, 0)._period() == 6\n assert Components(1 / 3, 1 / 2, 0, 0)._period() == 12\n assert Components(1 / 3, 1 / 2, 1 / 5, 0)._period() == 60\n assert Components(1 / 6, 1 / 2, 1 / 5, 0)._period() == 60\n assert Components(np.e, np.pi, 0, 0)._period() is None\n np.testing.assert_allclose(\n Components(np.e, np.e, 0, 0)._period(),\n 2/np.e)\n assert Components(-0.5, 0, 0, 0)._period() == 4\n assert Components(-0.5, 0.5, 0, 0)._period() == 4\n assert Components(-0.5, 0.5, 0.5, 0.5)._period() == 4\n assert Components(1, 1, -1, 1)._period() == 2\n\n\ndef test_pow():\n assert CExpZinGate(0.25)**2 == CExpZinGate(0.5)\n assert CExpZinGate(0.25)**-1 == CExpZinGate(-0.25)\n assert CExpZinGate(0.25)**0 == CExpZinGate(0)\n assert CExpZinGate(sympy.Symbol('a'))**1.5 == CExpZinGate(\n sympy.Symbol('a')*1.5)\n assert ZGateDef(exponent=0.25)**2 == ZGateDef(exponent=0.5)\n assert ZGateDef(exponent=0.25,\n global_shift=0.5)**2 == ZGateDef(\n exponent=0.5,\n global_shift=0.5)\n\n\ndef test_inverse():\n assert cirq.inverse(CExpZinGate(0.25)) == CExpZinGate(-0.25)\n assert cirq.inverse(CExpZinGate(sympy.Symbol('a'))) == CExpZinGate(\n -sympy.Symbol('a'))\n\n\ndef test_trace_distance_bound():\n assert cirq.trace_distance_bound(CExpZinGate(0.001)) < 0.01\n assert cirq.trace_distance_bound(CExpZinGate(sympy.Symbol('a'))) == 1\n assert cirq.approx_eq(cirq.trace_distance_bound(CExpZinGate(2)), 1)\n\n class E(cirq.EigenGate):\n\n def _num_qubits_(self):\n # coverage: ignore\n return 1\n\n def _eigen_components(self):\n return [\n (0, np.array([[1, 0], [0, 0]])),\n (12, np.array([[0, 0], [0, 1]])),\n ]\n\n for numerator in range(13):\n assert_has_consistent_trace_distance_bound(E()**(numerator / 12))\n\n\ndef test_extrapolate():\n h = CExpZinGate(2)\n assert cirq.pow(h, 1.5) is not None\n assert cirq.inverse(h, None) is not None\n\n p = CExpZinGate(0.1)\n assert cirq.pow(p, 1.5) is not None\n assert cirq.inverse(p) is not None\n\n s = CExpZinGate(sympy.Symbol('a'))\n assert cirq.pow(s, 1.5) == CExpZinGate(sympy.Symbol('a') * 1.5)\n assert cirq.inverse(s) == CExpZinGate(-sympy.Symbol('a'))\n\n\ndef test_matrix():\n\n for n in [1, 2, 3, 4, 0.0001, 3.9999]:\n assert cirq.has_unitary(CExpZinGate(n))\n\n np.testing.assert_allclose(\n cirq.unitary(CExpZinGate(1)),\n np.diag([1, 1, 1j, -1j]),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(CExpZinGate(2)),\n np.diag([1, 1, -1, -1]),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(CExpZinGate(3)),\n np.diag([1, 1, -1j, 1j]),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(CExpZinGate(4)),\n np.diag([1, 1, 1, 1]),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(CExpZinGate(0.00001)),\n cirq.unitary(CExpZinGate(3.99999)),\n atol=1e-4)\n\n assert not np.allclose(\n cirq.unitary(CExpZinGate(0.00001)),\n cirq.unitary(CExpZinGate(1.99999)),\n atol=1e-4)\n\n assert not cirq.has_unitary(CExpZinGate(sympy.Symbol('a')))\n assert cirq.unitary(CExpZinGate(sympy.Symbol('a')), None) is None\n\n np.testing.assert_allclose(\n cirq.unitary(ZGateDef(exponent=0)),\n np.eye(2),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(ZGateDef(exponent=1)),\n np.diag([1, -1]),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(ZGateDef(exponent=0.5)),\n np.diag([1, 1j]),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(ZGateDef(exponent=1, global_shift=0.5)),\n np.diag([1j, -1j]),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(ZGateDef(exponent=0.5, global_shift=0.5)),\n np.diag([1+1j, -1+1j])/np.sqrt(2),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(ZGateDef(exponent=0.5, global_shift=-0.5)),\n np.diag([1-1j, 1+1j])/np.sqrt(2),\n atol=1e-8)\n\n\ndef test_matrix_is_exact_for_quarter_turn():\n np.testing.assert_equal(\n cirq.unitary(CExpZinGate(1)),\n np.diag([1, 1, 1j, -1j]))\n\n\ndef test_is_parameterized():\n assert not cirq.is_parameterized(CExpZinGate(0))\n assert not cirq.is_parameterized(CExpZinGate(1))\n assert not cirq.is_parameterized(CExpZinGate(3))\n assert cirq.is_parameterized(CExpZinGate(sympy.Symbol('a')))\n\n\ndef test_resolve_parameters():\n assert cirq.resolve_parameters(CExpZinGate(sympy.Symbol('a')),\n cirq.ParamResolver({'a': 0.5})) == CExpZinGate(0.5)\n\n assert cirq.resolve_parameters(CExpZinGate(0.25),\n cirq.ParamResolver({})) == CExpZinGate(0.25)\n\n\ndef test_diagram_period():\n\n class ShiftyGate(cirq.EigenGate, cirq.SingleQubitGate):\n def _eigen_components(self):\n raise NotImplementedError()\n\n def __init__(self, e, *shifts):\n super().__init__(exponent=e, global_shift=np.random.random())\n self.shifts = shifts\n\n def _eigen_shifts(self):\n return list(self.shifts)\n\n args = cirq.CircuitDiagramInfoArgs.UNINFORMED_DEFAULT\n\n assert ShiftyGate(0.5, 0, 1)._diagram_exponent(args) == 0.5\n assert ShiftyGate(1.5, 0, 1)._diagram_exponent(args) == -0.5\n assert ShiftyGate(2.5, 0, 1)._diagram_exponent(args) == 0.5\n\n assert ShiftyGate(0.5, 0.5, -0.5)._diagram_exponent(args) == 0.5\n assert ShiftyGate(1.5, 0.5, -0.5)._diagram_exponent(args) == -0.5\n assert ShiftyGate(2.5, 0.5, -0.5)._diagram_exponent(args) == 0.5\n\n # Irrational period.\n np.testing.assert_allclose(\n ShiftyGate(np.e, 0, 1/np.e)._diagram_exponent(args),\n np.e,\n atol=1e-2) # diagram precision is 1e-3 and can perturb result.\n np.testing.assert_allclose(\n ShiftyGate(np.e*2.5, 0, 1/np.e)._diagram_exponent(args),\n np.e/2,\n atol=1e-2) # diagram precision is 1e-3 and can perturb result.\n\n # Unknown period.\n assert ShiftyGate(505.2, 0, np.pi, np.e)._diagram_exponent(args) == 505.2\n\n\nclass WeightedZPowGate(cirq.EigenGate, cirq.SingleQubitGate):\n\n def __init__(self, weight, **kwargs):\n self.weight = weight\n super().__init__(**kwargs)\n\n def _value_equality_values_(self):\n return self.weight, self._canonical_exponent, self._global_shift\n\n _value_equality_approximate_values_ = _value_equality_values_\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0])),\n (self.weight, np.diag([0, 1])),\n ]\n\n def _with_exponent(self, exponent):\n return type(self)(self.weight,\n exponent=exponent,\n global_shift=self._global_shift)\n\n\[email protected]('gate1,gate2,eq_up_to_global_phase', [\n (cirq.rz(0.3 * np.pi), cirq.Z**0.3, True),\n (cirq.Z, cirq.Gate, False),\n (cirq.rz(0.3), cirq.Z**0.3, False),\n (cirq.ZZPowGate(global_shift=0.5), cirq.ZZ, True),\n (cirq.ZPowGate(global_shift=0.5)**sympy.Symbol('e'), cirq.Z, False),\n (cirq.Z**sympy.Symbol('e'), cirq.Z**sympy.Symbol('f'), False),\n (cirq.ZZ**1.9, cirq.ZZ**-0.1, True),\n (WeightedZPowGate(0), WeightedZPowGate(0.1), False),\n (WeightedZPowGate(0.3), WeightedZPowGate(0.3, global_shift=0.1), True),\n (cirq.X, cirq.Z, False),\n (cirq.X**0.3, cirq.Z**0.3, False),\n])\ndef test_equal_up_to_global_phase(gate1, gate2, eq_up_to_global_phase):\n assert cirq.equal_up_to_global_phase(gate1, gate2) == eq_up_to_global_phase\n"
] | [
[
"numpy.linalg.eigvals",
"numpy.prod"
],
[
"numpy.array",
"numpy.allclose"
],
[
"numpy.diag",
"numpy.random.random",
"numpy.sqrt",
"numpy.eye",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anonymouslorem/library_identification_vulnerability_report | [
"3eb1916b25bcf885640ed19954377edf45f7498a",
"3eb1916b25bcf885640ed19954377edf45f7498a"
] | [
"FastXML/fastxml/fastxml/fastxml.py",
"LightXML/src/model.py"
] | [
"from builtins import range\r\nfrom builtins import object\r\nimport os\r\nimport json\r\nfrom collections import OrderedDict\r\n\r\nimport scipy.sparse as sp\r\n\r\nfrom .inferencer import IForest, LeafComputer, Blender, IForestBlender\r\n\r\nclass Inferencer(object):\r\n \"\"\"\r\n Loads up a model for inferencing\r\n \"\"\"\r\n def __init__(self, dname, gamma=30, blend=0.8, leaf_probs=False):\r\n with open(os.path.join(dname, 'settings'), 'rt') as f:\r\n self.__dict__.update(json.load(f))\r\n\r\n self.gamma = gamma\r\n self.blend = blend\r\n self.leaf_probs = leaf_probs\r\n\r\n forest = IForest(dname, self.n_trees, self.n_labels)\r\n if self.leaf_classifiers:\r\n lc = LeafComputer(dname)\r\n predictor = Blender(forest, lc)\r\n else:\r\n predictor = IForestBlender(forest)\r\n\r\n self.predictor = predictor\r\n\r\n def predict(self, X, fmt='sparse'):\r\n assert fmt in ('sparse', 'dict')\r\n s = []\r\n num = X.shape[0] if isinstance(X, sp.csr_matrix) else len(X)\r\n for i in range(num):\r\n Xi = X[i]\r\n mean = self.predictor.predict(Xi.data, Xi.indices, \r\n self.blend, self.gamma, self.leaf_probs)\r\n\r\n if fmt == 'sparse':\r\n s.append(mean)\r\n\r\n else:\r\n od = OrderedDict()\r\n for idx in reversed(mean.data.argsort()):\r\n od[mean.indices[idx]] = mean.data[idx]\r\n \r\n s.append(od)\r\n\r\n if fmt == 'sparse':\r\n return sp.vstack(s)\r\n\r\n return s\r\n\r\n",
"import tqdm\r\nimport time\r\nimport cProfile\r\nimport numpy as np\r\nfrom apex import amp\r\n\r\nimport torch\r\nfrom torch import nn\r\n\r\nfrom transformers import BertTokenizer, BertConfig, BertModel\r\nfrom transformers import RobertaModel, RobertaConfig, RobertaTokenizer\r\nfrom transformers import XLNetTokenizer, XLNetModel, XLNetConfig\r\n\r\nfrom tokenizers import BertWordPieceTokenizer\r\nfrom transformers import RobertaTokenizerFast\r\n\r\ndef get_bert(bert_name):\r\n if 'roberta' in bert_name:\r\n print('load roberta-base')\r\n model_config = RobertaConfig.from_pretrained('roberta-base')\r\n model_config.output_hidden_states = True\r\n bert = RobertaModel.from_pretrained('roberta-base', config=model_config)\r\n elif 'xlnet' in bert_name:\r\n print('load xlnet-base-cased')\r\n model_config = XLNetConfig.from_pretrained('xlnet-base-cased')\r\n model_config.output_hidden_states = True\r\n bert = XLNetModel.from_pretrained('xlnet-base-cased', config=model_config)\r\n else:\r\n print('load bert-base-uncased')\r\n model_config = BertConfig.from_pretrained('bert-base-uncased')\r\n model_config.output_hidden_states = True\r\n bert = BertModel.from_pretrained('bert-base-uncased', config=model_config)\r\n return bert\r\n\r\nclass LightXML(nn.Module):\r\n def __init__(self, n_labels, group_y=None, bert='bert-base', feature_layers=5, dropout=0.5, update_count=1,\r\n candidates_topk=10, \r\n use_swa=True, swa_warmup_epoch=10, swa_update_step=200, hidden_dim=300):\r\n super(LightXML, self).__init__()\r\n\r\n self.use_swa = use_swa\r\n self.swa_warmup_epoch = swa_warmup_epoch\r\n self.swa_update_step = swa_update_step\r\n self.swa_state = {}\r\n\r\n self.update_count = update_count\r\n\r\n self.candidates_topk = candidates_topk\r\n\r\n print('swa', self.use_swa, self.swa_warmup_epoch, self.swa_update_step, self.swa_state)\r\n print('update_count', self.update_count)\r\n\r\n self.bert_name, self.bert = bert, get_bert(bert)\r\n self.feature_layers, self.drop_out = feature_layers, nn.Dropout(dropout)\r\n\r\n self.group_y = group_y\r\n if self.group_y is not None:\r\n self.group_y_labels = group_y.shape[0]\r\n print('hidden dim:', hidden_dim)\r\n print('label goup numbers:', self.group_y_labels)\r\n\r\n self.l0 = nn.Linear(self.feature_layers*self.bert.config.hidden_size, self.group_y_labels)\r\n # hidden bottle layer\r\n self.l1 = nn.Linear(self.feature_layers*self.bert.config.hidden_size, hidden_dim)\r\n self.embed = nn.Embedding(n_labels, hidden_dim)\r\n nn.init.xavier_uniform_(self.embed.weight)\r\n else:\r\n self.l0 = nn.Linear(self.feature_layers*self.bert.config.hidden_size, n_labels)\r\n\r\n def get_candidates(self, group_logits, group_gd=None):\r\n logits = torch.sigmoid(group_logits.detach())\r\n if group_gd is not None:\r\n logits += group_gd\r\n scores, indices = torch.topk(logits, k=self.candidates_topk)\r\n scores, indices = scores.cpu().detach().numpy(), indices.cpu().detach().numpy()\r\n candidates, candidates_scores = [], []\r\n for index, score in zip(indices, scores):\r\n candidates.append(self.group_y[index])\r\n candidates_scores.append([np.full(c.shape, s) for c, s in zip(candidates[-1], score)])\r\n candidates[-1] = np.concatenate(candidates[-1])\r\n candidates_scores[-1] = np.concatenate(candidates_scores[-1])\r\n max_candidates = max([i.shape[0] for i in candidates])\r\n candidates = np.stack([np.pad(i, (0, max_candidates - i.shape[0]), mode='edge') for i in candidates])\r\n candidates_scores = np.stack([np.pad(i, (0, max_candidates - i.shape[0]), mode='edge') for i in candidates_scores])\r\n return indices, candidates, candidates_scores\r\n\r\n def forward(self, input_ids, attention_mask, token_type_ids,\r\n labels=None, group_labels=None, candidates=None):\r\n is_training = labels is not None\r\n\r\n outs = self.bert(\r\n input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids\r\n )[-1]\r\n\r\n out = torch.cat([outs[-i][:, 0] for i in range(1, self.feature_layers+1)], dim=-1)\r\n out = self.drop_out(out)\r\n group_logits = self.l0(out)\r\n if self.group_y is None:\r\n logits = group_logits\r\n if is_training:\r\n loss_fn = torch.nn.BCEWithLogitsLoss()\r\n loss = loss_fn(logits, labels)\r\n return logits, loss\r\n else:\r\n return logits\r\n\r\n if is_training:\r\n l = labels.to(dtype=torch.bool)\r\n target_candidates = torch.masked_select(candidates, l).detach().cpu()\r\n target_candidates_num = l.sum(dim=1).detach().cpu()\r\n groups, candidates, group_candidates_scores = self.get_candidates(group_logits,\r\n group_gd=group_labels if is_training else None)\r\n if is_training:\r\n bs = 0\r\n new_labels = []\r\n for i, n in enumerate(target_candidates_num.numpy()):\r\n be = bs + n\r\n c = set(target_candidates[bs: be].numpy())\r\n c2 = candidates[i]\r\n new_labels.append(torch.tensor([1.0 if i in c else 0.0 for i in c2 ]))\r\n if len(c) != new_labels[-1].sum():\r\n s_c2 = set(c2)\r\n for cc in list(c):\r\n if cc in s_c2:\r\n continue\r\n for j in range(new_labels[-1].shape[0]):\r\n if new_labels[-1][j].item() != 1:\r\n c2[j] = cc\r\n new_labels[-1][j] = 1.0\r\n break\r\n bs = be\r\n labels = torch.stack(new_labels).cuda()\r\n candidates, group_candidates_scores = torch.LongTensor(candidates).cuda(), torch.Tensor(group_candidates_scores).cuda()\r\n\r\n emb = self.l1(out)\r\n embed_weights = self.embed(candidates) # N, sampled_size, H\r\n emb = emb.unsqueeze(-1)\r\n logits = torch.bmm(embed_weights, emb).squeeze(-1)\r\n\r\n if is_training:\r\n loss_fn = torch.nn.BCEWithLogitsLoss()\r\n loss = loss_fn(logits, labels) + loss_fn(group_logits, group_labels)\r\n return logits, loss\r\n else:\r\n candidates_scores = torch.sigmoid(logits)\r\n candidates_scores = candidates_scores * group_candidates_scores\r\n return group_logits, candidates, candidates_scores\r\n\r\n def save_model(self, path):\r\n self.swa_swap_params()\r\n torch.save(self.state_dict(), path)\r\n self.swa_swap_params()\r\n\r\n def swa_init(self):\r\n self.swa_state = {'models_num': 1}\r\n for n, p in self.named_parameters():\r\n self.swa_state[n] = p.data.cpu().clone().detach()\r\n\r\n def swa_step(self):\r\n if 'models_num' not in self.swa_state:\r\n return\r\n self.swa_state['models_num'] += 1\r\n beta = 1.0 / self.swa_state['models_num']\r\n with torch.no_grad():\r\n for n, p in self.named_parameters():\r\n self.swa_state[n].mul_(1.0 - beta).add_(beta, p.data.cpu())\r\n\r\n def swa_swap_params(self):\r\n if 'models_num' not in self.swa_state:\r\n return\r\n for n, p in self.named_parameters():\r\n self.swa_state[n], p.data = self.swa_state[n].cpu(), p.data.cpu()\r\n self.swa_state[n], p.data = p.data.cpu(), self.swa_state[n].cuda()\r\n\r\n def get_fast_tokenizer(self):\r\n if 'roberta' in self.bert_name:\r\n tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base', do_lower_case=True)\r\n elif 'xlnet' in self.bert_name:\r\n tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') \r\n else:\r\n tokenizer = BertWordPieceTokenizer(\r\n \"data/.bert-base-uncased-vocab.txt\",\r\n lowercase=True)\r\n return tokenizer\r\n\r\n def get_tokenizer(self):\r\n if 'roberta' in self.bert_name:\r\n print('load roberta-base tokenizer')\r\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base', do_lower_case=True)\r\n elif 'xlnet' in self.bert_name:\r\n print('load xlnet-base-cased tokenizer')\r\n tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')\r\n else:\r\n print('load bert-base-uncased tokenizer')\r\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\r\n return tokenizer\r\n\r\n def get_accuracy(self, candidates, logits, labels):\r\n if candidates is not None:\r\n candidates = candidates.detach().cpu()\r\n scores, indices = torch.topk(logits.detach().cpu(), k=10)\r\n\r\n acc1, acc3, acc5, total = 0, 0, 0, 0\r\n for i, l in enumerate(labels):\r\n l = set(np.nonzero(l)[0])\r\n\r\n if candidates is not None:\r\n labels = candidates[i][indices[i]].numpy()\r\n else:\r\n labels = indices[i, :5].numpy()\r\n\r\n acc1 += len(set([labels[0]]) & l)\r\n acc3 += len(set(labels[:3]) & l)\r\n acc5 += len(set(labels[:5]) & l)\r\n total += 1\r\n\r\n return total, acc1, acc3, acc5\r\n\r\n def one_epoch(self, epoch, dataloader, optimizer,\r\n mode='train', eval_loader=None, eval_step=20000, log=None):\r\n\r\n bar = tqdm.tqdm(total=len(dataloader))\r\n p1, p3, p5 = 0, 0, 0\r\n g_p1, g_p3, g_p5 = 0, 0, 0\r\n total, acc1, acc3, acc5 = 0, 0, 0, 0\r\n g_acc1, g_acc3, g_acc5 = 0, 0, 0\r\n train_loss = 0\r\n\r\n if mode == 'train':\r\n self.train()\r\n else:\r\n self.eval()\r\n\r\n if self.use_swa and epoch == self.swa_warmup_epoch and mode == 'train':\r\n self.swa_init()\r\n\r\n if self.use_swa and mode == 'eval':\r\n self.swa_swap_params()\r\n\r\n pred_scores, pred_labels = [], []\r\n bar.set_description(f'{mode}-{epoch}')\r\n\r\n with torch.set_grad_enabled(mode == 'train'):\r\n for step, data in enumerate(dataloader):\r\n batch = tuple(t for t in data)\r\n have_group = len(batch) > 4\r\n inputs = {'input_ids': batch[0].cuda(),\r\n 'attention_mask': batch[1].cuda(),\r\n 'token_type_ids': batch[2].cuda()}\r\n if mode == 'train':\r\n inputs['labels'] = batch[3].cuda()\r\n if self.group_y is not None:\r\n inputs['group_labels'] = batch[4].cuda()\r\n inputs['candidates'] = batch[5].cuda()\r\n\r\n outputs = self(**inputs)\r\n\r\n bar.update(1)\r\n\r\n if mode == 'train':\r\n loss = outputs[1]\r\n loss /= self.update_count\r\n train_loss += loss.item()\r\n\r\n with amp.scale_loss(loss, optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n \r\n if step % self.update_count == 0:\r\n optimizer.step()\r\n self.zero_grad()\r\n\r\n if step % eval_step == 0 and eval_loader is not None and step != 0:\r\n results = self.one_epoch(epoch, eval_loader, optimizer, mode='eval')\r\n p1, p3, p5 = results[3:6]\r\n g_p1, g_p3, g_p5 = results[:3]\r\n if self.group_y is not None:\r\n log.log(f'{epoch:>2} {step:>6}: {p1:.4f}, {p3:.4f}, {p5:.4f}'\r\n f' {g_p1:.4f}, {g_p3:.4f}, {g_p5:.4f}')\r\n else:\r\n log.log(f'{epoch:>2} {step:>6}: {p1:.4f}, {p3:.4f}, {p5:.4f}')\r\n # NOTE: we don't reset model to train mode and keep model in eval mode\r\n # which means all dropout will be remove after `eval_step` in every epoch\r\n # this tricks makes LightXML converge fast\r\n # self.train()\r\n\r\n if self.use_swa and step % self.swa_update_step == 0:\r\n self.swa_step()\r\n\r\n bar.set_postfix(loss=loss.item())\r\n elif self.group_y is None:\r\n logits = outputs\r\n if mode == 'eval':\r\n labels = batch[3]\r\n _total, _acc1, _acc3, _acc5 = self.get_accuracy(None, logits, labels.cpu().numpy())\r\n total += _total; acc1 += _acc1; acc3 += _acc3; acc5 += _acc5\r\n p1 = acc1 / total\r\n p3 = acc3 / total / 3\r\n p5 = acc5 / total / 5\r\n bar.set_postfix(p1=p1, p3=p3, p5=p5)\r\n elif mode == 'test':\r\n pred_scores.append(logits.detach().cpu())\r\n else:\r\n group_logits, candidates, logits = outputs\r\n\r\n if mode == 'eval':\r\n labels = batch[3]\r\n group_labels = batch[4]\r\n\r\n _total, _acc1, _acc3, _acc5 = self.get_accuracy(candidates, logits, labels.cpu().numpy())\r\n total += _total; acc1 += _acc1; acc3 += _acc3; acc5 += _acc5\r\n p1 = acc1 / total\r\n p3 = acc3 / total / 3\r\n p5 = acc5 / total / 5\r\n \r\n _, _g_acc1, _g_acc3, _g_acc5 = self.get_accuracy(None, group_logits, group_labels.cpu().numpy())\r\n g_acc1 += _g_acc1; g_acc3 += _g_acc3; g_acc5 += _g_acc5\r\n g_p1 = g_acc1 / total\r\n g_p3 = g_acc3 / total / 3\r\n g_p5 = g_acc5 / total / 5\r\n bar.set_postfix(p1=p1, p3=p3, p5=p5, g_p1=g_p1, g_p3=g_p3, g_p5=g_p5)\r\n elif mode == 'test':\r\n _scores, _indices = torch.topk(logits.detach().cpu(), k=100)\r\n _labels = torch.stack([candidates[i][_indices[i]] for i in range(_indices.shape[0])], dim=0)\r\n pred_scores.append(_scores.cpu())\r\n pred_labels.append(_labels.cpu())\r\n\r\n\r\n if self.use_swa and mode == 'eval':\r\n self.swa_swap_params()\r\n bar.close()\r\n\r\n if mode == 'eval':\r\n return g_p1, g_p3, g_p5, p1, p3, p5\r\n elif mode == 'test':\r\n return torch.cat(pred_scores, dim=0).numpy(), torch.cat(pred_labels, dim=0).numpy() if len(pred_labels) != 0 else None\r\n elif mode == 'train':\r\n return train_loss\r\n"
] | [
[
"scipy.sparse.vstack"
],
[
"torch.cat",
"torch.nn.Embedding",
"numpy.concatenate",
"torch.nn.BCEWithLogitsLoss",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.topk",
"torch.nn.Dropout",
"numpy.pad",
"numpy.full",
"torch.tensor",
"torch.bmm",
"torch.masked_select",
"torch.sigmoid",
"torch.LongTensor",
"numpy.nonzero",
"torch.nn.Linear",
"torch.stack",
"torch.Tensor",
"torch.nn.init.xavier_uniform_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mahesh131998/voice-based-visual-acuity-test | [
"67bf5d2141ee6725c4c37fa3ae67d3cac9cf01bf"
] | [
"eye1.py"
] | [
"# from flask import Flask, render_template, Response, request, redirect, url_for\r\n# import tkinter as tk\r\n# import time\r\n# import random\r\n# import speech_recognition as sr\r\nimport pyttsx3 as engine\r\n# import threading\r\n# from bs4 import BeautifulSoup \r\n# import requests \r\n\r\nfrom flask import Flask, render_template, Response, request, redirect, url_for,flash, session\r\nimport tkinter as tk\r\nimport time\r\nimport random\r\nimport speech_recognition as sr\r\n# import pyttsx3\r\nimport threading\r\nfrom werkzeug.utils import secure_filename\r\nimport os\r\nfrom flask_session import Session\r\nimport numpy as np\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\r\nfrom keras.models import Sequential, load_model\r\nimport time\r\nimport sys\r\nimport requests \r\nfrom bs4 import BeautifulSoup \r\nSESSION_TYPE = 'filesystem'\r\nsess = Session()\r\n\r\nglobal i\r\nglobal p\r\napp = Flask(__name__,static_url_path='/static')\r\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\r\n\r\n\r\n\r\[email protected](\"/\")\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\[email protected](\"/eye/\", methods=['POST'])\r\ndef index1():\r\n def fun():\r\n window = tk.Tk()\r\n window.configure(background='white')\r\n window.state(\"zoomed\")\r\n canvas = tk.Canvas(window, bg=\"white\", width=980, height=580, highlightthickness=0)\r\n canvas.pack(fill=tk.BOTH, expand=True)\r\n canvas_scroll = tk.Scrollbar(canvas, command=canvas.yview)\r\n canvas_scroll.place(relx=1, rely=0, relheight=1, anchor=tk.NE)\r\n canvas.configure(yscrollcommand=canvas_scroll.set, scrollregion=())\r\n i=0\r\n wrong=0\r\n engine.speak(\"Before begning the test kindly keep 6 meter distance from the screen, we will now test the right eye, cover the left eye\")\r\n j=[152,130,108,87,65,43,33,21,15,9]\r\n for x in j:\r\n i=i+1\r\n # speech to text algorithm\r\n def speech():\r\n nonlocal wrong\r\n def top():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n # read the audio data from the default microphone\r\n r.adjust_for_ambient_noise(source)\r\n print(\"speak now\") \r\n engine.speak(\"you may speak now\")\r\n print('hello world')\r\n # engine.runAndWait()\r\n audio_data = r.record(source, duration=3)\r\n print(\"Recognizing...\")\r\n try:\r\n # convert speech to text\r\n text = r.recognize_google(audio_data, language='en-GB')\r\n print(type(text))\r\n if(((text>='a' and text<= 'z') or (text>='A' and text<='Z')) and (len(text)==1)):\r\n print(len(text))\r\n print(text)\r\n return text \r\n \r\n else:\r\n engine.speak(\"sorry we could not recognise you said, say it clearly again\")\r\n # engine.runAndWait()\r\n return top() \r\n \r\n except:\r\n engine.speak(\"sorry could not recognise ur voice, you will have to say that again\")\r\n # engine.runAndWait()\r\n print(\"sorry could not recognise ur voice\")\r\n return top()\r\n \r\n #Scomparison code\r\n for g in op:\r\n print(op)\r\n d= top()\r\n if d.isupper()== False:\r\n d= d.upper()\r\n print(d)\r\n if g != d:\r\n wrong = wrong+1\r\n \r\n\r\n if wrong !=0:\r\n print(\"wrong=\", wrong)\r\n canvas.destroy()\r\n window.destroy()\r\n break\r\n elif i==10 and wrong==0:\r\n canvas.destroy()\r\n window.destroy()\r\n break\r\n \r\n #this will call the screen display\r\n list = ['A','D','F','L','M','N','W','X']\r\n sampling = random.sample(list, k=5)\r\n op = sampling\r\n # here the random letters are generated\r\n\r\n def applytoLabel():\r\n n = len(op)\r\n element = ''\r\n for i in range(n):\r\n element = element + op[i] +\" \"\r\n return element\r\n m=x\r\n l9 = tk.Label(canvas, text=applytoLabel(),font= (\"Optician Sans\", m ,'bold'), bg=\"white\").grid(column=1, row=1, sticky='nsew',padx=85, pady=250)\r\n canvas.create_window(33,33, window=l9, anchor=tk.NW)\r\n window.after(1,window.update(),speech())\r\n \r\n window.mainloop()\r\n print(\"number of iterations\",i-1) \r\n va =[1.00,0.90,0.80,0.70,0.60,0.50,0.40,0.30,0.20,0.10]\r\n LogMAR = va[i-2] + 0.02 * (wrong)\r\n print(LogMAR,\"LogMAR Units\")\r\n righteye = LogMAR\r\n wrong =0\r\n return righteye\r\n\r\n \r\n\r\n def fun1(): \r\n window1 = tk.Tk()\r\n window1.configure(background='white')\r\n window1.state(\"zoomed\") \r\n canvas1 = tk.Canvas(window1, bg=\"white\", width=980, height=580, highlightthickness=0)\r\n canvas1.pack(fill=tk.BOTH, expand=True)\r\n canvas1_scroll = tk.Scrollbar(canvas1, command=canvas1.yview)\r\n canvas1_scroll.place(relx=1, rely=0, relheight=1, anchor=tk.NE)\r\n canvas1.configure(yscrollcommand=canvas1_scroll.set, scrollregion=())\r\n p=0\r\n wrong1=0\r\n engine.speak(\"Before begning the test kindly keep 6 meter distance from the screen, we will now test your left eye, cover the right eye\")\r\n j=[152,130,108,87,65,43,33,21,15,9]\r\n for x in j:\r\n p=p+1\r\n # speech to text algorithm\r\n def speech():\r\n nonlocal wrong1\r\n def top():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n # read the audio data from the default microphone\r\n r.adjust_for_ambient_noise(source) \r\n print(\"speak now\") \r\n engine.speak(\"you may speak now\")\r\n # engine.runAndWait()\r\n audio_data = r.record(source, duration=3)\r\n print(\"Recognizing...\")\r\n try:\r\n # convert speech to text\r\n text = r.recognize_google(audio_data, language='en-GB')\r\n print(type(text))\r\n if(((text>='a' and text<= 'z') or (text>='A' and text<='Z')) and (len(text)==1)):\r\n print(len(text))\r\n # print(text, \"is an Alphabet)\r\n # print(type(text))\r\n print(text)\r\n return text \r\n \r\n else:\r\n engine.speak(\"sorry we could not recognise you said, say it clearly again\")\r\n # engine.runAndWait()\r\n # print(text)\r\n return top() \r\n \r\n except:\r\n engine.speak(\"sorry could not recognise ur voice, you will have to say that again\")\r\n # engine.runAndWait()\r\n print(\"sorry could not recognise ur voice\")\r\n return top()\r\n\r\n #Scomparison code\r\n for g in op:\r\n print(op)\r\n d= top()\r\n if d.isupper()== False:\r\n d= d.upper()\r\n print(d)\r\n if g != d:\r\n wrong1 = wrong1+1\r\n \r\n\r\n if wrong1 !=0:\r\n print(\"wrong=\", wrong1)\r\n canvas1.destroy()\r\n window1.destroy()\r\n break\r\n elif p==10 and wrong1==0:\r\n canvas1.destroy()\r\n window1.destroy()\r\n break\r\n \r\n #this will call the screen display\r\n list = ['A','D','F','L','M','N','W','X']\r\n sampling = random.sample(list, k=5)\r\n op = sampling\r\n # here the random letters are generated\r\n\r\n def applytoLabel():\r\n n = len(op)\r\n element = ''\r\n for i in range(n):\r\n element = element + op[i] +\" \"\r\n return element\r\n m=x\r\n l9 = tk.Label(canvas1, text=applytoLabel(),font= (\"Optician Sans\", m ,'bold'), bg=\"white\").grid(column=1, row=1, sticky='nsew',padx=85, pady=250)\r\n canvas1.create_window(33,33, window1=l9, anchor=tk.NW)\r\n window1.after(1,window1.update(),speech())\r\n \r\n window1.mainloop()\r\n print(\"number of iterations\",p-1) \r\n va =[1.00,0.90,0.80,0.70,0.60,0.50,0.40,0.30,0.20,0.10]\r\n LogMAR1 = va[p-2] + 0.02 * (wrong1)\r\n print(LogMAR1,\"LogMAR Units\")\r\n lefteye= LogMAR1\r\n wrong1=0\r\n return lefteye\r\n\r\n \r\n right=fun()\r\n \r\n left=fun1()\r\n \r\n \r\n \r\n return render_template('index.html',righteye = right, lefteye = left)\r\n\r\n\r\[email protected]('/webscraping', methods = ['GET', 'POST'])\r\ndef webscraping():\r\n disease_name=''\r\n if request.method == 'POST':\r\n diseasename = request.form[\"browser\"]\r\n print(diseasename)\r\n print(\"hi\")\r\n print(diseasename) \r\n disease = diseasename \r\n URL = \"https://www.nhs.uk/conditions/\"\r\n r = requests.get(URL) \r\n i=0 \r\n soup = BeautifulSoup(r.content, 'html5lib') \r\n\r\n names = []\r\n link = []\r\n\r\n for item in soup.findAll('a', {'class': 'nhsuk-list-panel__link'}):\r\n names.append(item.get_text(strip=True))\r\n\r\n for item in soup.findAll('li', attrs = {'class':'nhsuk-list-panel__item'}):\r\n link.append(item.a['href'] )\r\n\r\n for j in names:\r\n if j == disease :\r\n print(j)\r\n break\r\n else:\r\n i= i+1\r\n\r\n pandu ='https://www.nhs.uk/'+link[i]\r\n\r\n print(pandu)\r\n URL1 = pandu\r\n r1 = requests.get(URL1) \r\n soup = BeautifulSoup(r1.content, 'html5lib') \r\n table = soup.findAll('section') \r\n\r\n quotes = [] \r\n for row in table: \r\n na = row.get_text() \r\n quotes.append(na) \r\n\r\n pop= ''\r\n for fo in quotes:\r\n pop= pop + fo\r\n\r\n print(pop)\r\n return render_template('index.html', pop= pop)\r\n\r\n\r\napp.config['UPLOAD_FOLDER'] = 'C://Users//Mahesh//Desktop//new env//env//data//pogo'\r\[email protected]('/uploader', methods = ['GET', 'POST'])\r\ndef upload_file():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n filename = secure_filename(f.filename)\r\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n flash('file uploaded successfully')\r\n status = 'file uploaded successfully'\r\n return render_template('index.html', status= status)\r\n\r\n\r\[email protected]('/finder', methods = ['GET', 'POST'])\r\ndef finderpic(): \r\n start = time.time()\r\n #Define Path\r\n model_path = './models/model.h5'\r\n model_weights_path = './models/weights.h5'\r\n test_path = 'data/pogo'\r\n\r\n #Load the pre-trained models\r\n model = load_model(model_path)\r\n model.load_weights(model_weights_path)\r\n\r\n #Define image parameters\r\n img_width, img_height = 150, 150\r\n\r\n #Prediction Function\r\n def predict(file):\r\n x = load_img(file, target_size=(img_width,img_height))\r\n x = img_to_array(x)\r\n x = np.expand_dims(x, axis=0)\r\n array = model.predict(x)\r\n result = array[0]\r\n #print(result)\r\n answer = np.argmax(result)\r\n if answer == 0:\r\n print(\"Predicted: cataract\")\r\n elif answer == 1:\r\n print(\"Predicted:conjunctivities \")\r\n elif answer == 2:\r\n print(\"Predicted: eyelid cyst\")\r\n elif answer == 3:\r\n print(\"Predicted: jaundise\")\r\n\r\n return answer\r\n\r\n #Walk the directory for every image\r\n for i, ret in enumerate(os.walk(test_path)):\r\n for i, filename in enumerate(ret[2]):\r\n if filename.startswith(\".\"):\r\n continue\r\n \r\n print(ret[0] + '/' + filename)\r\n result = predict(ret[0] + '/' + filename)\r\n print(result)\r\n if result == 0:\r\n predict=' cataract'\r\n elif result == 1:\r\n predict= 'conjunctivities' \r\n elif result == 2:\r\n predict= 'eyelid cyst'\r\n elif result == 3:\r\n predict= 'jaundise'\r\n\r\n print(predict)\r\n #Calculate execution time\r\n end = time.time()\r\n dur = end-start\r\n\r\n if dur<60:\r\n print(\"Execution Time:\",dur,\"seconds\")\r\n elif dur>60 and dur<3600:\r\n dur=dur/60\r\n print(\"Execution Time:\",dur,\"minutes\")\r\n else:\r\n dur=dur/(60*60)\r\n print(\"Execution Time:\",dur,\"hours\")\r\n\r\n folder_path = (r'C://Users//Mahesh//Desktop//new env//env//data//pogo')\r\n test = os.listdir(folder_path)\r\n for images in test:\r\n if images.endswith(('jpg','jpeg','png')):\r\n os.remove(os.path.join(folder_path, images))\r\n\r\n print(' images deleted')\r\n return render_template('index.html',predict = predict)\r\n \r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.secret_key = 'super secret key'\r\n app.config['SESSION_TYPE'] = 'filesystem'\r\n\r\n sess.init_app(app)\r\n app.run(debug=True)"
] | [
[
"numpy.expand_dims",
"numpy.argmax",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.preprocessing.image.load_img"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.4",
"2.3",
"2.5",
"2.6"
]
}
] |
qilei123/AdelaiDet | [
"36f31670c2cc15b11b0367edee2b09d39e764c59"
] | [
"adet/modeling/postprocessing.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport torch\nfrom torch.nn import functional as F\n\nfrom detectron2.layers import paste_masks_in_image\nfrom adet.structures.instances import Instances\nfrom detectron2.utils.memory import retry_if_cuda_oom\n\n\ndef detector_postprocess(results, output_height, output_width, img_cls_pred, mask_threshold=0.5):\n \"\"\"\n Resize the output instances.\n The input images are often resized when entering an object detector.\n As a result, we often need the outputs of the detector in a different\n resolution from its inputs.\n\n This function will resize the raw outputs of an R-CNN detector\n to produce outputs according to the desired output resolution.\n\n Args:\n results (Instances): the raw outputs from the detector.\n `results.image_size` contains the input image resolution the detector sees.\n This object might be modified in-place.\n output_height, output_width: the desired output resolution.\n\n Returns:\n Instances: the resized output from the model, based on the output resolution\n \"\"\"\n\n # Converts integer tensors to float temporaries\n # to ensure true division is performed when\n # computing scale_x and scale_y.\n if isinstance(output_width, torch.Tensor):\n output_width_tmp = output_width.float()\n else:\n output_width_tmp = output_width\n\n if isinstance(output_height, torch.Tensor):\n output_height_tmp = output_height.float()\n else:\n output_height_tmp = output_height\n\n scale_x, scale_y = (\n output_width_tmp / results.image_size[1],\n output_height_tmp / results.image_size[0],\n )\n results = Instances((output_height, output_width), img_cls_pred, **results.get_fields())\n\n if results.has(\"pred_boxes\"):\n output_boxes = results.pred_boxes\n elif results.has(\"proposal_boxes\"):\n output_boxes = results.proposal_boxes\n\n output_boxes.scale(scale_x, scale_y)\n output_boxes.clip(results.image_size)\n\n results = results[output_boxes.nonempty()]\n\n if results.has(\"pred_masks\"):\n results.pred_masks = retry_if_cuda_oom(paste_masks_in_image)(\n results.pred_masks[:, 0, :, :], # N, 1, M, M\n results.pred_boxes,\n results.image_size,\n threshold=mask_threshold,\n )\n\n if results.has(\"pred_keypoints\"):\n results.pred_keypoints[:, :, 0] *= scale_x\n results.pred_keypoints[:, :, 1] *= scale_y\n\n return results\n\n\ndef sem_seg_postprocess(result, img_size, output_height, output_width):\n \"\"\"\n Return semantic segmentation predictions in the original resolution.\n\n The input images are often resized when entering semantic segmentor. Moreover, in same\n cases, they also padded inside segmentor to be divisible by maximum network stride.\n As a result, we often need the predictions of the segmentor in a different\n resolution from its inputs.\n\n Args:\n result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),\n where C is the number of classes, and H, W are the height and width of the prediction.\n img_size (tuple): image size that segmentor is taking as input.\n output_height, output_width: the desired output resolution.\n\n Returns:\n semantic segmentation prediction (Tensor): A tensor of the shape\n (C, output_height, output_width) that contains per-pixel soft predictions.\n \"\"\"\n result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)\n result = F.interpolate(\n result, size=(output_height, output_width), mode=\"bilinear\", align_corners=False\n )[0]\n return result\n"
] | [
[
"torch.nn.functional.interpolate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangyidong3/detectron2 | [
"286e6877494353161a99fb26954ef0886ff2d219"
] | [
"tools/plain_train_net.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nDetectron2 training script with a plain training loop.\n\nThis scripts reads a given config file and runs the training or evaluation.\nIt is an entry point that is able to train standard models in detectron2.\n\nIn order to let one script support training of many models,\nthis script contains logic that are specific to these built-in models and therefore\nmay not be suitable for your own project.\nFor example, your research project perhaps only needs a single \"evaluator\".\n\nTherefore, we recommend you to use detectron2 as an library and take\nthis file as an example of how to use the library.\nYou may want to write your own script with your datasets and other customizations.\n\nCompared to \"train_net.py\", this script supports fewer default features.\nIt also includes fewer abstraction, therefore is easier to add custom logic.\n\"\"\"\n\nimport logging\nimport os\nfrom collections import OrderedDict\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel\n\nimport detectron2.utils.comm as comm\nfrom detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer\nfrom detectron2.config import get_cfg\nfrom detectron2.data import (\n MetadataCatalog,\n build_detection_test_loader,\n build_detection_train_loader,\n)\nfrom detectron2.engine import default_argument_parser, default_setup, launch\nfrom detectron2.evaluation import (\n CityscapesEvaluator,\n COCOEvaluator,\n COCOPanopticEvaluator,\n DatasetEvaluators,\n LVISEvaluator,\n PascalVOCDetectionEvaluator,\n SemSegEvaluator,\n inference_on_dataset,\n print_csv_format,\n)\nfrom detectron2.modeling import build_model\nfrom detectron2.solver import build_lr_scheduler, build_optimizer\nfrom detectron2.utils.events import (\n CommonMetricPrinter,\n EventStorage,\n JSONWriter,\n TensorboardXWriter,\n)\n\nlogger = logging.getLogger(\"detectron2\")\n\n\ndef get_evaluator(cfg, dataset_name, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesEvaluator(dataset_name)\n if evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n if evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(dataset_name, evaluator_type)\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)\n\n\ndef do_test(cfg, model):\n results = OrderedDict()\n for dataset_name in cfg.DATASETS.TEST:\n data_loader = build_detection_test_loader(cfg, dataset_name)\n evaluator = get_evaluator(\n cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n )\n results_i = inference_on_dataset(model, data_loader, evaluator)\n results[dataset_name] = results_i\n if comm.is_main_process():\n logger.info(\"Evaluation results for {} in csv format:\".format(dataset_name))\n print_csv_format(results_i)\n if len(results) == 1:\n results = list(results.values())[0]\n return results\n\n\ndef do_train(cfg, model, resume=False):\n model.train()\n optimizer = build_optimizer(cfg, model)\n scheduler = build_lr_scheduler(cfg, optimizer)\n\n checkpointer = DetectionCheckpointer(\n model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler\n )\n start_iter = (\n checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get(\"iteration\", -1) + 1\n )\n max_iter = cfg.SOLVER.MAX_ITER\n\n periodic_checkpointer = PeriodicCheckpointer(\n checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter\n )\n\n writers = (\n [\n CommonMetricPrinter(max_iter),\n JSONWriter(os.path.join(cfg.OUTPUT_DIR, \"metrics.json\")),\n TensorboardXWriter(cfg.OUTPUT_DIR),\n ]\n if comm.is_main_process()\n else []\n )\n\n # compared to \"train_net.py\", we do not support accurate timing and\n # precise BN here, because they are not trivial to implement\n data_loader = build_detection_train_loader(cfg)\n logger.info(\"Starting training from iteration {}\".format(start_iter))\n with EventStorage(start_iter) as storage:\n for data, iteration in zip(data_loader, range(start_iter, max_iter)):\n iteration = iteration + 1\n storage.step()\n\n loss_dict = model(data)\n losses = sum(loss for loss in loss_dict.values())\n assert torch.isfinite(losses).all(), loss_dict\n\n loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n if comm.is_main_process():\n storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)\n\n optimizer.zero_grad()\n losses.backward()\n optimizer.step()\n storage.put_scalar(\"lr\", optimizer.param_groups[0][\"lr\"], smoothing_hint=False)\n scheduler.step()\n\n if (\n cfg.TEST.EVAL_PERIOD > 0\n and iteration % cfg.TEST.EVAL_PERIOD == 0\n and iteration != max_iter\n ):\n do_test(cfg, model)\n # Compared to \"train_net.py\", the test results are not dumped to EventStorage\n comm.synchronize()\n\n if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter):\n for writer in writers:\n writer.write()\n periodic_checkpointer.step(iteration)\n\n\ndef setup(args):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg\n\n\ndef main(args):\n cfg = setup(args)\n\n model = build_model(cfg)\n logger.info(\"Model:\\n{}\".format(model))\n if args.eval_only:\n DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume\n )\n return do_test(cfg, model)\n\n distributed = comm.get_world_size() > 1\n if distributed:\n model = DistributedDataParallel(\n model, device_ids=[comm.get_local_rank()], broadcast_buffers=False\n )\n\n do_train(cfg, model)\n return do_test(cfg, model)\n\n\nif __name__ == \"__main__\":\n args = default_argument_parser().parse_args()\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n"
] | [
[
"torch.cuda.device_count",
"torch.isfinite"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
potassco/xorro | [
"6ed499ac1608cf1d1d1b82b632d5961ee1bd8439",
"6ed499ac1608cf1d1d1b82b632d5961ee1bd8439"
] | [
"xorro/tests/gje_test.py",
"xorro/gje_prop_n.py"
] | [
"\"\"\"\nGauss-Jordan Tests Suite\n\"\"\"\nimport xorro\nfrom xorro import gje\nfrom xorro import gje_simplex as simplex\nimport numpy as np\n\ndef cols_state_to_matrix(state):\n ## Parse columns state to matrix\n return gje.columns_state_to_matrix(state)\n\ndef get_clause(m,lits):\n ## Deduce clause after GJE\n return gje.deduce_clause(m,lits)\n\ndef xor_columns(col,parity):\n ## XOR parity column with parity column\n return gje.xor_columns(col,parity)\n\ndef swap_row(m,i,j):\n ## Swap Rows m[i] with m[j]\n return gje.swap(m,i,j)\n\ndef xor_row(m,i,j):\n ## XOR Rows m[i] with m[j]\n return gje.xor(m,i,j)\n \ndef remove_rows_zeros(m):\n ## Remove rows with all zeros including the augmented column\n matrix = gje.remove_rows_zeros(m)\n return matrix\n\ndef check_sat(m):\n ## Check SAT\n return gje.check_sat(m)\n \ndef solve_gje(m, show):\n ## If there are more than unary xors perform GJE\n if len(m[0]) > 2:\n m = gje.remove_rows_zeros(m)\n m = gje.perform_gauss_jordan_elimination(m, show)\n return m\n\ndef solve_gje_(m, show):\n ## If there are more than unary xors perform GJE\n if len(m[0]) > 2:\n m = gje.remove_rows_zeros(m)\n m = np.array([np.array(xi) for xi in m])\n m = gje.perform_gauss_jordan_elimination_(m, show)\n return m\n \n\n\n\"\"\"\nGauss-Jordan Exclusive Tests\nParse the columns state to a binary matrix and return the list of literals\n\"\"\"\ndef test_columns_state_to_matrix(self):\n self.assertEqual(cols_state_to_matrix(\n {'parity': [0, 1, 1, 0, 0], 2: [1, 0, 0, 1, 0], 3: [0, 0, 0, 0, 1], 4: [1, 1, 0, 0, 0], 5: [0, 1, 0, 0, 0], 6: [1, 1, 0, 0, 0], 7: [0, 0, 1, 0, 1], 8: [0, 0, 1, 0, 0], 9: [0, 0, 0, 1, 0], 10: [0, 0, 0, 1, 0]}),\n ([[1,0,1,0,1,0,0,0,0,0],\n [0,0,1,1,1,0,0,0,0,1],\n [0,0,0,0,0,1,1,0,0,1],\n [1,0,0,0,0,0,0,1,1,0],\n [0,1,0,0,0,1,0,0,0,0]],[2,3,4,5,6,7,8,9,10]))\n\n \n\"\"\"\nDeduce clause after Gauss-Jordan Elimination\n\"\"\"\ndef test_get_clauses(self):\n self.assertEqual(get_clause([[1, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]], [2,3,4]), [-2])\n\n self.assertEqual(get_clause([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 1]], [2,3,4]), [-2,-3,4])\n\n self.assertEqual(get_clause([[1, 0, 1, 1, 0, 1],\n [0, 1, 1, 0, 0, 0],\n [1, 0, 1, 1, 1, 0]], [2,3,4,5,6]), [])\n\n\"\"\"\nXOR a single column with Parity column Tests\n\"\"\"\ndef test_xor_columns(self):\n self.assertEqual(xor_columns([1, 0],[1, 0]),[0, 0])\n\n self.assertEqual(xor_columns([0, 0, 0, 0, 0],[1, 1, 1, 1, 1]),[1, 1, 1, 1, 1])\n\n self.assertEqual(xor_columns([0, 1, 0, 1],[1, 0, 1, 0]),[1, 1, 1, 1])\n\n \n\"\"\"\nSwap Rows Tests\n\"\"\"\ndef test_swap_rows(self):\n self.assertEqual(swap_row([[1, 0, 1, 1, 1, 1],\n [1, 1, 0, 1, 0, 1],\n [1, 0, 0, 0, 0, 1]], 1, 2),[[1, 0, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 1],\n [1, 1, 0, 1, 0, 1]])\n\n self.assertEqual(swap_row([[0, 0],\n [1, 1]], 1, 0),[[1, 1],\n [0, 0]])\n\n self.assertEqual(swap_row([[0, 1],\n [1, 0]], 1, 0),[[1, 0],\n [0, 1]])\n\n\"\"\"\nXOR Rows Tests\n\"\"\"\ndef test_xor_rows(self):\n self.assertEqual(xor_row([[1, 0],\n [1, 1],\n [1, 0]], 0, 1),[[1, 0],\n [0, 1],\n [1, 0]])\n\n self.assertEqual(xor_row([[0, 0],\n [1, 1]], 1, 0),[[1, 1],\n [1, 1]])\n\n self.assertEqual(xor_row([[0, 0],\n [0, 0]], 1, 0),[[0, 0],\n [0, 0]])\n\n\"\"\" \nPre GJE... Remove Rows if they are all zeros\n\"\"\"\n## Remove Rows full of Zeros \ndef test_remove_zeros(self):\n self.assertEqual(remove_rows_zeros([[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]),\n [[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1]])\n\n self.assertEqual(remove_rows_zeros([[1, 0, 0],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 0]]),\n [[1, 0, 0],\n [0, 1, 1],\n [0, 0, 1]])\n\n self.assertEqual(remove_rows_zeros([[0, 1, 1],\n [1, 0, 0],\n [0, 0, 0]]),\n [[0, 1, 1],\n [1, 0, 0]])\n\n\n\"\"\" \nCheck Satisfiability/Conflict wrt the augmented column. \nReturn True if conflict (It must exist an empty odd equation)\n\"\"\"\n## Check SATISFIABILITY\ndef test_check_sat(self):\n self.assertEqual(check_sat([[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 0, 1],\n [0, 0, 0, 0]]),True)\n\n self.assertEqual(check_sat([[1, 0, 0],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0]]),False)\n\n self.assertEqual(check_sat([[1, 0, 1],\n [0, 1, 0],\n [0, 0, 1]]),True)\n\n self.assertEqual(check_sat([[1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 1],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]]),False)\n\n\n\"\"\"\nGauss-Jordan Elimination Tests\n\nThe second parameter in the solve function is a flag.\nIf True, it will display the GJ Elimination Procedure\n\n\"\"\"\n\n## No GJE due matrix size. Return the same matrix to check SAT\ndef test_no_gje(self):\n self.assertEqual(solve_gje([[1, 0],\n [1, 1],\n [1, 0]],False),\n [[1, 0],\n [1, 1],\n [1, 0]])\n\n self.assertEqual(solve_gje([[1, 0],\n [0, 1]],False),\n [[1, 0],\n [0, 1]])\n \n # solve_gje_\n self.assertEqual(solve_gje_([[1, 0],\n [1, 1],\n [1, 0]],False),\n [[1, 0],\n [1, 1],\n [1, 0]])\n\n self.assertEqual(solve_gje_([[1, 0],\n [0, 1]],False),\n [[1, 0],\n [0, 1]])\n\n## More Columns than Rows\ndef test_more_cols(self):\n self.assertEqual(solve_gje([[0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0],\n [1, 0, 0, 1, 0]],False),\n [[1, 0, 0, 1, 0],\n [0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0]])\n\n self.assertEqual(solve_gje([[0, 1, 1, 0],\n [0, 1, 1, 0],\n [1, 0, 0, 0]],False),\n [[1, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]])\n\n self.assertEqual(solve_gje([[1, 0, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]],False),\n [[1, 0, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0]])\n\n self.assertEqual(solve_gje([[0, 1, 0, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0],\n [0, 1, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 1, 0, 0, 0, 0]],False),\n [[1, 0, 0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje([[1, 0, 1, 0, 1, 1, 0, 0],\n [1, 1, 1, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 0, 0, 1],\n [0, 1, 0, 1, 0, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]],False),\n [[1, 0, 0, 0, 0, 1, 0, 1],\n [0, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1]])\n\n # solve_gje_\n self.assertEqual(solve_gje_([[0, 1, 1, 0, 0],\n [0, 1, 1, 0, 0],\n [1, 0, 0, 1, 0]],False).tolist(),\n [[1, 0, 0, 1, 0],\n [0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0]])\n\n self.assertEqual(solve_gje_([[0, 1, 1, 0],\n [0, 1, 1, 0],\n [1, 0, 0, 0]],False).tolist(),\n [[1, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]])\n\n self.assertEqual(solve_gje_([[1, 0, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]],False).tolist(),\n [[1, 0, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0]])\n\n self.assertEqual(solve_gje_([[0, 1, 0, 0, 0, 0, 0, 1],\n [0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 0],\n [0, 1, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 1, 0, 0, 0, 0]],False).tolist(),\n [[1, 0, 0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 1],\n [0, 0, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje_([[1, 0, 1, 0, 1, 1, 0, 0],\n [1, 1, 1, 0, 0, 0, 1, 1],\n [0, 0, 1, 0, 1, 0, 0, 1],\n [0, 1, 0, 1, 0, 1, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]],False).tolist(),\n [[1, 0, 0, 0, 0, 1, 0, 1],\n [0, 1, 0, 0, 0, 1, 1, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1]])\n\n## Square Matrix\ndef test_square(self):\n self.assertEqual(solve_gje([[1, 0, 1, 0, 1, 0],\n [1, 1, 1, 0, 0, 1],\n [0, 0, 1, 0, 1, 1],\n [0, 1, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 0]],False),\n [[1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje([[1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0],\n [0, 1, 0, 0, 1],\n [0, 0, 1, 1, 0]],False),\n [[1, 0, 0, 0, 1],\n [0, 1, 0, 0, 1],\n [0, 0, 1, 0, 1],\n [0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje([[1, 1, 1, 1],\n [1, 0, 1, 0],\n [0, 0, 1, 0]],False),\n [[1, 0, 0, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0]])\n\n self.assertEqual(solve_gje([[0, 0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0, 1],\n [1, 0, 1, 1, 1, 1],\n [0, 1, 0, 1, 0, 0],\n [1, 0, 0, 1, 0, 1]],False),\n [[1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 1],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje([[1, 1, 1],\n [1, 0, 1]],False),\n [[1, 0, 1],\n [0, 1, 0]])\n\n # solve_gje_\n self.assertEqual(solve_gje_([[1, 0, 1, 0, 1, 0],\n [1, 1, 1, 0, 0, 1],\n [0, 0, 1, 0, 1, 1],\n [0, 1, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 0]],False).tolist(),\n [[1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje_([[1, 0, 1, 1, 1],\n [1, 0, 1, 0, 0],\n [0, 1, 0, 0, 1],\n [0, 0, 1, 1, 0]],False).tolist(),\n [[1, 0, 0, 0, 1],\n [0, 1, 0, 0, 1],\n [0, 0, 1, 0, 1],\n [0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje_([[1, 1, 1, 1],\n [1, 0, 1, 0],\n [0, 0, 1, 0]],False).tolist(),\n [[1, 0, 0, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0]])\n\n self.assertEqual(solve_gje_([[0, 0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0, 1],\n [1, 0, 1, 1, 1, 1],\n [0, 1, 0, 1, 0, 0],\n [1, 0, 0, 1, 0, 1]],False).tolist(),\n [[1, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 1],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]])\n\n self.assertEqual(solve_gje_([[1, 1, 1],\n [1, 0, 1]],False).tolist(),\n [[1, 0, 1],\n [0, 1, 0]])\n \n\n\n## More Rows than Columns\ndef test_more_rows(self):\n self.assertEqual(solve_gje([[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 1]],False),\n [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1]])\n\n self.assertEqual(solve_gje([[0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 1, 0]],False),\n [[1, 0, 0],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 1]])\n\n self.assertEqual(solve_gje([[0, 1, 1],\n [1, 0, 0],\n [0, 0, 0]],False),\n [[1, 0, 0],\n [0, 1, 1]])\n\n # solve_gje_\n self.assertEqual(solve_gje_([[1, 0, 1, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 1]],False).tolist(),\n [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1]])\n\n self.assertEqual(solve_gje_([[0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 1, 0]],False).tolist(),\n [[1, 0, 0],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 1]])\n\n self.assertEqual(solve_gje_([[0, 1, 1],\n [1, 0, 0]],False).tolist(),\n [[1, 0, 0],\n [0, 1, 1]])\n\n\ndef test_incremental_reduce(self):\n mm = simplex.Matrix([[1, 0, 0, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 0]])\n self.assertEqual(mm.__reduce__(0,0),\n ([1],[2]))\n\n\n mm = simplex.Matrix([[1, 0, 0, 1],\n [0, 1, 1, 0],\n [0, 0, 1, 0]])\n self.assertEqual(mm.__reduce__(2,2),\n ([1],[0]))\n \n mm = simplex.Matrix([[1, 0, 0, 1, 1, 1],\n [0, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 0]])\n self.assertEqual(mm.__reduce__(4,0),\n ([1,2],[]))\n\n \n mm = simplex.Matrix([[1, 0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 1, 0]])\n self.assertEqual(mm.__reduce__(5,1),\n ([2],[0]))\n\n\ndef test_remove_row(self):\n mm = simplex.Matrix([[1, 0, 0, 1],\n [1, 1, 1, 1],\n [0, 0, 1, 0]])\n self.assertEqual(mm.__remove_row__([1, 0, 0, 1]),\n [[1, 1, 1, 1],\n [0, 0, 1, 0]])\n\n\n mm = simplex.Matrix([[1, 0, 0, 1],\n [0, 1, 1, 0],\n [0, 0, 1, 0]])\n self.assertEqual(mm.__remove_row__([0, 1, 1, 0]),\n [[1, 0, 0, 1],\n [0, 0, 1, 0]])\n\n \n mm = simplex.Matrix([[1, 0, 0, 1, 1, 1],\n [0, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 0]])\n self.assertEqual(mm.__remove_row__([0, 1, 1, 1, 1, 0]),\n [[1, 0, 0, 1, 1, 1],\n [0, 0, 1, 1, 1, 0]])\n\n \n mm = simplex.Matrix([[1, 0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1, 0],\n [0, 0, 1, 1, 0, 1, 0]])\n self.assertEqual(mm.__remove_row__([0, 0, 1, 1, 0, 1, 0]),\n [[1, 0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1, 0]])\n\ndef test_remove_col(self):\n mm = simplex.Matrix([[1, 0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1, 0]])\n self.assertEqual(mm.__remove_col__(2),\n [[1, 0, 1, 1, 0, 1],\n [0, 1, 0, 1, 1, 0]])\n",
"from . import util\nfrom . import gje\nfrom itertools import chain\nimport clingo\nimport numpy as np\n\nclass List:\n \"\"\"\n All columns of the state are built in this class, the parity column and the literals\n \"\"\"\n def __init__(self, ls):\n self.__list = ls\n\n def __len__(self):\n return len(self.__list)\n\n def __getitem__(self, idx):\n return self.__list[idx]\n \nclass XOR:\n \"\"\"\n A XOR constraint maintains the following invariants:\n 1. there are at least two literals, and\n 2. the first two literals are unassigned, or all literals are assigned and\n the first two literals have been assigned last on the same decision\n level.\n Furthermore, an index pointing to the literal after the literal assigned\n last is maintained. We start the search for the next unassigned literal\n from this point. This is important to get the amortized linear propagation\n time.\n \"\"\"\n def __init__(self, literals):\n assert(len(literals) >= 2)\n self.__literals = literals\n self.__index = 2\n\n def __len__(self):\n return len(self.__literals)\n\n def __getitem__(self, idx):\n return self.__literals[idx]\n\n def __setitem__(self, idx, val):\n self.__literals[idx] = val\n return val \n\n def propagate(self, assignment, i):\n \"\"\"\n Propagates the given assigned index.\n\n If an unwatched unassigned literal is found, the literals are\n rearranged so that the given index points to it. The function returns\n true if an such a literal is found.\n \"\"\"\n assert(i < 2)\n for j in chain(range(self.__index, len(self)), range(2, self.__index)):\n if assignment.value(self[j]) is None:\n self.__index = j + 1 if j + 1 < len(self) else 2\n self[i], self[j] = self[j], self[i]\n return True\n return False\n\n def reason_gje(self, columns, assignment, cutoff):\n state = {}\n parities = columns[\"parity\"]\n partial = []\n\n ## Get Partial Assignment\n for key, value in columns.items():\n if key != \"parity\":\n assign = assignment.value(key)\n if assign == None:\n state[key] = value\n elif assign == True:\n parities = gje.xor_(value._List__list, parities)\n partial.append( key)\n elif assign == False:\n partial.append(-key)\n \n ## Build the matrix from columns state\n xor_lits = [ lit for lit in state ]\n m = [ col._List__list for col in state.values()]\n m.append(parities)\n matrix = np.array(m).T\n\n ## Check SATISFIABILITY and find consequences\n conflict, clause = gje.check_sat_(matrix, xor_lits)\n ## Detect conflict or clauses before GJE\n if conflict:\n return conflict, partial, clause\n \n ## If there are more than unary xors perform GJE\n if len(matrix[0]) > 2 and len(parities)>1:\n matrix = gje.remove_rows_zeros_(matrix)\n matrix = gje.perform_gauss_jordan_elimination_(matrix, False)\n\n ## Check SATISFIABILITY and find consequences\n conflict, clause = gje.check_sat_(matrix, xor_lits)\n\n return conflict, partial, clause\n\n\nclass State_GJE:\n def __init__(self, cutoff):\n self.__states = []\n self.__columns = []\n self.__literals = []\n self.__sat = True\n self.__consequences = []\n self.__cutoff = cutoff\n\n def __add_watch(self, ctl, xor, unassigned, thread_ids):\n \"\"\"\n Adds a watch for the for the given index.\n\n The literal at the given index has to be either unassigned or become\n unassigned through backtracking before the associated constraint can\n become unit resulting again.\n \"\"\"\n variable = abs(xor[unassigned])\n ctl.add_watch( variable)\n ctl.add_watch(-variable)\n for thread_id in thread_ids:\n self.__states[thread_id].setdefault(variable, []).append((xor, unassigned))\n\n def init(self, init):\n \"\"\"\n Initializes xor constraints based on the symbol table to build a binary matrix.\n This propagator is called on fixpoints to perform Gauss-Jordan Elimination after Unit Propagation\n \"\"\"\n for thread_id in range(len(self.__states), init.number_of_threads):\n self.__states.append({})\n self.__columns.append({})\n\n init.check_mode = clingo.PropagatorCheckMode.Fixpoint\n\n ## Get the constraints\n ret = util.symbols_to_xor_r(init.symbolic_atoms, util.default_get_lit(init))\n \n if ret is None:\n self.__sat = False\n elif ret is not None:\n # NOTE: whether facts should be handled here is up to question\n # this should only be necessary if the propagator is to be used standalone\n # without any of the other approaches\n constraints, facts = ret\n self.__consequences.extend(facts)\n\n ## Get the literals and parities\n pars = []\n literals = []\n for constraint in constraints:\n # Consequences\n if len(constraint) == 1:\n lit = next(iter(constraint))\n self.__consequences.append(lit if constraint[0] > 1 else -lit)\n # Watch XOR\n elif len(constraint):\n xor = XOR(constraint)\n self.__add_watch(init, xor, 0, range(init.number_of_threads))\n self.__add_watch(init, xor, 1, range(init.number_of_threads))\n # Get literals\n for lit in constraint:\n value = init.assignment.value(lit)\n if value == None and abs(lit) not in literals:\n literals.append(abs(lit))\n \n # FIXME: check if there is another way to do this. All constraints are represented as \"odd\" constraints but GJE only uses non-negative variables/literals.\n # Somehow we need to convert xor constraints with a negative into a positive literal and invert the parity to build the matrix.\n # Get parities\n if constraint[0] < 0:\n pars.append(0)\n else:\n pars.append(1)\n\n # Sort literals\n self.__literals = List(sorted(literals))\n \n # Add parities to the state\n for thread_id in range(init.number_of_threads):\n self.__columns[thread_id][\"parity\"] = List(np.array(pars))\n \n # Build the rest of the matrix\n matrix = []\n for constraint in constraints:\n matrix.append(gje.lits_to_binary_(constraint, literals))\n\n # Transpose\n matrix = np.array(matrix).T\n for thread_id in range(init.number_of_threads):\n for i in range(len(literals)):\n self.__columns[thread_id][literals[i]] = List(matrix[i])\n \n else:\n # NOTE: if the propagator is to be used standalone, this case has to be handled\n pass\n \n\n def check(self, control):\n \"\"\"\n Check if current assignment is conflict-free, detect a conflict or deduce literals\n by doing Gauss-Jordan Elimination\n \"\"\"\n \"\"\"\n Since the XOR constraint above handles only constraints with at least\n two literals, here the other two cases are handled.\n\n Empty conflicting constraints result in top-level conflicts and unit\n constraints will be propagated on the top-level.\n \"\"\"\n if not self.__sat:\n control.add_clause([]) and control.propagate()\n return\n for lit in self.__consequences:\n if not control.add_clause([lit]) or not control.propagate():\n return\n\n def propagate(self, control, changes):\n \"\"\"\n Propagates XOR constraints maintaining two watches per constraint.\n\n Generated conflicts are guaranteed to be asserting (have at least two\n literals from the current decision level).\n \"\"\"\n state = self.__states[control.thread_id]\n columns = self.__columns[control.thread_id]\n cutoff = self.__cutoff\n \n for literal in changes:\n variable = abs(literal)\n state[variable], watches = [], state[variable]\n assert(len(watches) > 0)\n for i in range(len(watches)):\n xor, unassigned = watches[i]\n if xor.propagate(control.assignment, unassigned):\n # We found an unassigned literal, which is watched next.\n self.__add_watch(control, xor, unassigned, (control.thread_id,))\n else:\n # Here the constraint is either unit, satisfied, or\n # conflicting. In any case, we can keep the watch because\n # (*) the current decision level has to be backtracked\n # before the constraint can become unit again.\n state[variable].append((xor, unassigned)) \n \n ## GJE\n conflict, partial, clause = xor.reason_gje(columns, control.assignment, cutoff)\n if conflict:\n if not control.add_nogood(partial):\n return\n elif clause:\n for lit in clause:\n if not control.add_nogood(partial+[-lit]):\n return \n \n if len(state[variable]) == 0:\n control.remove_watch( variable)\n control.remove_watch(-variable)\n state.pop(variable)\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sIncerass/nums | [
"57c4d8f67c31c6215dea1ede07e8c0f063c68a6b"
] | [
"nums/core/array/blockarray.py"
] | [
"# coding=utf-8\n# Copyright (C) 2020 NumS Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport itertools\n\nimport numpy as np\n\nfrom nums.core.array import selection\nfrom nums.core.array import utils as array_utils\nfrom nums.core.array.base import BlockArrayBase, Block\nfrom nums.core.array.view import ArrayView\nfrom nums.core.grid.grid import ArrayGrid\nfrom nums.core.compute.compute_manager import ComputeManager\n\n\nclass BlockArray(BlockArrayBase):\n @classmethod\n def empty(cls, shape, block_shape, dtype, cm: ComputeManager):\n grid = ArrayGrid(shape=shape, block_shape=block_shape, dtype=dtype.__name__)\n grid_meta = grid.to_meta()\n arr = BlockArray(grid, cm)\n for grid_entry in grid.get_entry_iterator():\n arr.blocks[grid_entry].oid = cm.empty(\n grid_entry,\n grid_meta,\n syskwargs={\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape},\n )\n return arr\n\n @classmethod\n def from_scalar(cls, val, cm):\n if not array_utils.is_scalar(val):\n raise ValueError(\"%s is not a scalar.\" % val)\n return BlockArray.from_np(np.array(val), block_shape=(), copy=False, cm=cm)\n\n @classmethod\n def from_oid(cls, oid, shape, dtype, cm):\n block_shape = shape\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n ba = BlockArray(grid, cm)\n for i, grid_entry in enumerate(grid.get_entry_iterator()):\n assert i == 0\n ba.blocks[grid_entry].oid = oid\n return ba\n\n @classmethod\n def from_np(cls, arr, block_shape, copy, cm):\n dtype_str = str(arr.dtype)\n grid = ArrayGrid(arr.shape, block_shape, dtype_str)\n rarr = BlockArray(grid, cm)\n grid_entry_iterator = grid.get_entry_iterator()\n for grid_entry in grid_entry_iterator:\n grid_slice = grid.get_slice(grid_entry)\n block = arr[grid_slice]\n if copy:\n block = np.copy(block)\n rarr.blocks[grid_entry].oid = cm.put(block)\n rarr.blocks[grid_entry].dtype = getattr(np, dtype_str)\n return rarr\n\n @classmethod\n def from_blocks(cls, arr: np.ndarray, result_shape, cm):\n sample_idx = tuple(0 for dim in arr.shape)\n if isinstance(arr, Block):\n sample_block = arr\n result_shape = ()\n else:\n sample_block = arr[sample_idx]\n if result_shape is None:\n result_shape = array_utils.shape_from_block_array(arr)\n result_block_shape = sample_block.shape\n result_dtype_str = sample_block.dtype.__name__\n result_grid = ArrayGrid(\n shape=result_shape, block_shape=result_block_shape, dtype=result_dtype_str\n )\n assert arr.shape == result_grid.grid_shape\n result = BlockArray(result_grid, cm)\n for grid_entry in result_grid.get_entry_iterator():\n if isinstance(arr, Block):\n block: Block = arr\n else:\n block: Block = arr[grid_entry]\n result.blocks[grid_entry] = block\n return result\n\n def copy(self):\n grid_copy = self.grid.from_meta(self.grid.to_meta())\n rarr_copy = BlockArray(grid_copy, self.cm)\n for grid_entry in grid_copy.get_entry_iterator():\n rarr_copy.blocks[grid_entry] = self.blocks[grid_entry].copy()\n return rarr_copy\n\n def touch(self):\n \"\"\"\n \"Touch\" an array. This is an efficient distributed \"wait\" operation.\n \"\"\"\n oids = []\n for grid_entry in self.grid.get_entry_iterator():\n block: Block = self.blocks[grid_entry]\n oids.append(\n self.cm.touch(\n block.oid,\n syskwargs={\n \"grid_entry\": block.grid_entry,\n \"grid_shape\": block.grid_shape,\n },\n )\n )\n self.cm.get(oids)\n return self\n\n def reshape(self, *shape, **kwargs):\n block_shape = kwargs.get(\"block_shape\", None)\n if array_utils.is_int(shape):\n shape = (shape,)\n elif len(shape) == 0:\n shape = self.shape\n elif isinstance(shape[0], (tuple, list)):\n assert len(shape) == 1\n shape = shape[0]\n else:\n assert all(np.issubdtype(type(n), int) for n in shape)\n shape = Reshape.compute_shape(self.shape, shape)\n if block_shape is None:\n if shape == self.shape:\n # This is a noop.\n block_shape = self.block_shape\n else:\n block_shape = self.cm.get_block_shape(shape, self.dtype)\n return Reshape()(self, shape, block_shape)\n\n def expand_dims(self, axis):\n \"\"\"\n This function refers to the numpy implementation of expand_dims.\n \"\"\"\n if type(axis) not in (tuple, list):\n axis = (axis,)\n out_ndim = len(axis) + self.ndim\n axis = np.core.numeric.normalize_axis_tuple(axis, out_ndim)\n\n shape_it = iter(self.shape)\n block_shape_it = iter(self.block_shape)\n shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]\n block_shape = [\n 1 if ax in axis else next(block_shape_it) for ax in range(out_ndim)\n ]\n return self.reshape(shape, block_shape=block_shape)\n\n def squeeze(self):\n shape = self.shape\n block_shape = self.block_shape\n new_shape = []\n new_block_shape = []\n for s, b in zip(shape, block_shape):\n if s == 1:\n assert b == 1\n continue\n new_shape.append(s)\n new_block_shape.append(b)\n return self.reshape(new_shape, block_shape=new_block_shape)\n\n def swapaxes(self, axis1, axis2):\n meta_swap = self.grid.to_meta()\n shape = list(meta_swap[\"shape\"])\n block_shape = list(meta_swap[\"block_shape\"])\n dim = len(shape)\n if axis1 >= dim or axis2 >= dim:\n raise ValueError(\"axis is larger than the array dimension\")\n shape[axis1], shape[axis2] = shape[axis2], shape[axis1]\n block_shape[axis1], block_shape[axis2] = block_shape[axis2], block_shape[axis1]\n meta_swap[\"shape\"] = tuple(shape)\n meta_swap[\"block_shape\"] = tuple(block_shape)\n grid_swap = ArrayGrid.from_meta(meta_swap)\n rarr_src = np.ndarray(self.blocks.shape, dtype=\"O\")\n\n for grid_entry in self.grid.get_entry_iterator():\n rarr_src[grid_entry] = self.blocks[grid_entry].swapaxes(axis1, axis2)\n rarr_src = rarr_src.swapaxes(axis1, axis2)\n\n rarr_swap = BlockArray(grid_swap, self.cm, rarr_src)\n return rarr_swap\n\n def __getattr__(self, item):\n if item == \"__array_priority__\" or item == \"__array_struct__\":\n # This is triggered by a numpy array on the LHS.\n raise ValueError(\"Unable to covert numpy array to block array.\")\n elif item == \"ndim\":\n return len(self.shape)\n elif item == \"T\":\n metaT = self.grid.to_meta()\n metaT[\"shape\"] = tuple(reversed(metaT[\"shape\"]))\n metaT[\"block_shape\"] = tuple(reversed(metaT[\"block_shape\"]))\n gridT = ArrayGrid.from_meta(metaT)\n rarrT = BlockArray(gridT, self.cm)\n rarrT.blocks = np.copy(self.blocks.T)\n for grid_entry in rarrT.grid.get_entry_iterator():\n rarrT.blocks[grid_entry] = rarrT.blocks[grid_entry].transpose()\n return rarrT\n else:\n raise NotImplementedError(item)\n\n def __getitem__(self, item):\n if not isinstance(item, tuple):\n ss = (item,)\n else:\n ss = item\n # We need to fetch any block arrays.\n tmp = []\n for entry in ss:\n if isinstance(entry, BlockArray):\n tmp.append(entry.get())\n else:\n tmp.append(entry)\n ss = tuple(tmp)\n is_handled_advanced = True\n if len(ss) > 1:\n # Check if all entries are full slices except the last entry.\n for entry in ss[:-1]:\n is_handled_advanced = is_handled_advanced and (\n isinstance(entry, slice)\n and entry.start is None\n and entry.stop is None\n )\n if is_handled_advanced and array_utils.is_array_like(ss[-1]):\n # Treat this as a shuffle.\n return self._advanced_single_array_subscript(\n sel=(ss[-1],), axis=len(ss) - 1\n )\n\n av: ArrayView = ArrayView.from_block_array(self)\n # TODO (hme): We don't have to create, but do so for now until we need to optimize.\n return av[item].create(BlockArray)\n\n def _advanced_single_array_subscript(self, sel: tuple, block_size=None, axis=0):\n def group_by_block(\n dst_grid_entry,\n dst_slice_tuples,\n src_grid,\n dst_index_list,\n src_index_list,\n axis=0,\n ):\n # Block grid entries needed to write to given dst_slice_selection.\n src_blocks = {}\n dst_slice_np = np.array(dst_slice_tuples).T\n dst_index_arr = np.array(dst_index_list)\n src_index_arr = np.array(src_index_list)\n # Pick the smallest type to represent indices.\n # A set of these indices may be transmitted over the network,\n # so we want to pick the smallest encoding possible.\n index_types = [\n (2 ** 8, np.uint8),\n (2 ** 16, np.uint16),\n (2 ** 32, np.uint32),\n (2 ** 64, np.uint64),\n ]\n index_type = None\n for bound, curr_index_type in index_types:\n if np.all(np.array(src_grid.block_shape[axis]) < bound) and np.all(\n dst_slice_np[1][axis] < bound\n ):\n index_type = curr_index_type\n break\n if index_type is None:\n raise Exception(\"Unable to encode block indices, blocks are too large.\")\n dst_entry_test = list(dst_grid_entry[:axis]) + list(\n dst_grid_entry[axis + 1 :]\n )\n num_pairs_check = 0\n for grid_entry in src_grid.get_entry_iterator():\n # Must match on every entry except axis.\n src_entry_test = list(grid_entry[:axis]) + list(grid_entry[axis + 1 :])\n if dst_entry_test != src_entry_test:\n # Skip this block.\n continue\n src_slice_np = np.array(src_grid.get_slice_tuples(grid_entry)).T\n index_pairs = []\n for i in range(src_index_arr.shape[0]):\n src_index = src_index_arr[i]\n dst_index = dst_index_arr[i]\n if np.all(\n (src_slice_np[0][axis] <= src_index)\n & (src_index < src_slice_np[1][axis])\n ):\n index_pair = (\n np.array(\n dst_index - dst_slice_np[0][axis], dtype=index_type\n ),\n np.array(\n src_index - src_slice_np[0][axis], dtype=index_type\n ),\n )\n index_pairs.append(index_pair)\n num_pairs_check += 1\n if len(index_pairs) > 0:\n src_blocks[grid_entry] = index_pairs\n assert num_pairs_check == len(dst_index_list)\n return src_blocks\n\n array = sel[0]\n assert len(array.shape) == 1\n assert np.all(0 <= array) and np.all(array < self.shape[axis])\n if block_size is None:\n block_size = self.block_shape[axis]\n axis_dim = len(array)\n shape = tuple(\n list(self.shape[:axis]) + [axis_dim] + list(self.shape[axis + 1 :])\n )\n block_shape = tuple(\n list(self.block_shape[:axis])\n + [block_size]\n + list(self.block_shape[axis + 1 :])\n )\n dst_arr = BlockArray.empty(\n shape=shape, block_shape=block_shape, dtype=self.dtype, cm=self.cm\n )\n\n for dst_grid_entry in dst_arr.grid.get_entry_iterator():\n dst_block: Block = dst_arr.blocks[dst_grid_entry]\n dst_slice_selection = dst_arr.grid.get_slice(dst_grid_entry)\n dst_index_array = selection.slice_to_range(\n dst_slice_selection[axis], shape[axis]\n )\n src_index_array = array[dst_slice_selection[axis]]\n assert len(dst_index_array) == len(src_index_array)\n # Can this be sped up by grouping all src blocks outside of this loop?\n src_blocks = group_by_block(\n dst_grid_entry,\n dst_arr.grid.get_slice_tuples(dst_grid_entry),\n self.grid,\n dst_index_array,\n src_index_array,\n axis,\n )\n for src_grid_entry in src_blocks:\n src_block: Block = self.blocks[src_grid_entry]\n index_pairs = src_blocks[src_grid_entry]\n syskwargs = {\n \"grid_entry\": dst_grid_entry,\n \"grid_shape\": dst_arr.grid.grid_shape,\n }\n dst_block.oid = self.cm.update_block_along_axis(\n dst_block.oid, src_block.oid, index_pairs, axis, syskwargs=syskwargs\n )\n return dst_arr\n\n def __setitem__(self, key, value):\n av: ArrayView = ArrayView.from_block_array(self)\n av[key] = value\n\n @staticmethod\n def to_block_array(obj, cm: ComputeManager, block_shape=None):\n if isinstance(obj, BlockArray):\n return obj\n if isinstance(obj, np.ndarray):\n np_array = obj\n elif isinstance(obj, list):\n np_array = np.array(obj)\n elif array_utils.is_scalar(obj):\n return BlockArray.from_scalar(obj, cm)\n else:\n raise Exception(\"Unsupported type %s\" % type(obj))\n if block_shape is None:\n block_shape = cm.get_block_shape(np_array.shape, np_array.dtype)\n return BlockArray.from_np(np_array, block_shape, False, cm)\n\n def check_or_convert_other(self, other, compute_block_shape=False):\n block_shape = None if compute_block_shape else self.block_shape\n return BlockArray.to_block_array(other, self.cm, block_shape=block_shape)\n\n def ufunc(self, op_name):\n result = self.copy()\n for grid_entry in self.grid.get_entry_iterator():\n result.blocks[grid_entry] = self.blocks[grid_entry].ufunc(op_name)\n return result\n\n def _tree_reduce(\n self, op_name, blocks_or_oids, result_grid_entry, result_grid_shape\n ):\n \"\"\"\n Basic tree reduce imp.\n Schedules op on same node as left operand.\n :param op_name: The reduction op.\n :param blocks_or_oids: A list of type Block or a list of tuples.\n Tuples must be of the form\n (oid, grid_entry, grid_shape, transposed)\n :param result_grid_entry: The grid entry of the result block. This will be used\n to compute the final reduction step.\n :param result_grid_shape: The grid entry of the result block. This will be used\n to compute the final reduction step.\n :return: The oid of the result.\n \"\"\"\n oid_list = blocks_or_oids\n if isinstance(blocks_or_oids[0], Block):\n oid_list = [\n (b.oid, b.grid_entry, b.grid_shape, b.transposed)\n for b in blocks_or_oids\n ]\n if len(oid_list) == 1:\n return oid_list[0][0]\n q = oid_list\n while len(q) > 1:\n a_oid, a_ge, a_gs, a_T = q.pop(0)\n b_oid, _, _, b_T = q.pop(0)\n ge, gs = (\n (result_grid_entry, result_grid_shape) if len(q) == 0 else (a_ge, a_gs)\n )\n c_oid = self.cm.bop_reduce(\n op_name,\n a_oid,\n b_oid,\n a_T,\n b_T,\n syskwargs={\n \"grid_entry\": ge,\n \"grid_shape\": gs,\n },\n )\n q.append((c_oid, ge, gs, False))\n r_oid, r_ge, r_gs, _ = q.pop(0)\n assert r_ge == result_grid_entry\n assert r_gs == result_grid_shape\n return r_oid\n\n def reduce_axis(self, op_name, axis, keepdims=False):\n if not (axis is None or isinstance(axis, (int, np.int32, np.int64))):\n raise NotImplementedError(\"Only integer axis is currently supported.\")\n block_reduced_oids = np.empty_like(self.blocks, dtype=tuple)\n for grid_entry in self.grid.get_entry_iterator():\n block = self.blocks[grid_entry]\n block_oid = self.cm.reduce_axis(\n op_name=op_name,\n arr=block.oid,\n axis=axis,\n keepdims=keepdims,\n transposed=block.transposed,\n syskwargs={\n \"grid_entry\": block.grid_entry,\n \"grid_shape\": block.grid_shape,\n },\n )\n block_reduced_oids[grid_entry] = (\n block_oid,\n block.grid_entry,\n block.grid_shape,\n False,\n )\n result_shape = []\n result_block_shape = []\n for curr_axis in range(len(self.shape)):\n axis_size, axis_block_size = (\n self.shape[curr_axis],\n self.block_shape[curr_axis],\n )\n if curr_axis == axis or axis is None:\n if keepdims:\n axis_size, axis_block_size = 1, 1\n else:\n continue\n result_shape.append(axis_size)\n result_block_shape.append(axis_block_size)\n result_shape = tuple(result_shape)\n result_block_shape = tuple(result_block_shape)\n result_dtype = array_utils.get_reduce_output_type(op_name, self.dtype)\n result_grid = ArrayGrid(\n shape=result_shape,\n block_shape=result_block_shape,\n dtype=result_dtype.__name__,\n )\n result = BlockArray(result_grid, self.cm)\n\n if axis is None:\n if result.shape == ():\n result_block: Block = result.blocks[()]\n else:\n result_block: Block = result.blocks[:].item()\n result_block.oid = self._tree_reduce(\n op_name,\n block_reduced_oids.flatten().tolist(),\n result_block.grid_entry,\n result_block.grid_shape,\n )\n else:\n for result_grid_entry in result_grid.get_entry_iterator():\n block_reduced_oids_axis = []\n for sum_dim in range(self.grid.grid_shape[axis]):\n grid_entry = list(result_grid_entry)\n if keepdims:\n grid_entry[axis] = sum_dim\n else:\n grid_entry = grid_entry[:axis] + [sum_dim] + grid_entry[axis:]\n grid_entry = tuple(grid_entry)\n block_reduced_oids_axis.append(block_reduced_oids[grid_entry])\n result_block: Block = result.blocks[result_grid_entry]\n result_block.oid = self._tree_reduce(\n op_name,\n block_reduced_oids_axis,\n result_block.grid_entry,\n result_block.grid_shape,\n )\n return result\n\n def __matmul__(self, other):\n if len(self.shape) > 2:\n # TODO (bcp): NumPy's implementation does a stacked matmul, which is not supported yet.\n raise NotImplementedError(\n \"Matrix multiply for tensors of rank > 2 not supported yet.\"\n )\n else:\n return self.tensordot(other, 1)\n\n def _compute_tensordot_syskwargs(self, self_block: Block, other_block: Block):\n # Schedule on larger block.\n if np.product(self_block.shape) >= np.product(other_block.shape):\n return self_block.true_grid_entry(), self_block.true_grid_shape()\n else:\n return other_block.true_grid_entry(), other_block.true_grid_shape()\n\n def tensordot(self, other, axes=2):\n if not isinstance(other, BlockArray):\n raise ValueError(\n \"Cannot automatically construct BlockArray for tensor operations.\"\n )\n\n if isinstance(axes, int):\n pass\n elif array_utils.is_array_like(axes):\n raise NotImplementedError(\"Non-integer axes is currently not supported.\")\n else:\n raise TypeError(f\"Unexpected axes type '{type(axes).__name__}'\")\n\n if array_utils.np_tensordot_param_test(\n self.shape, self.ndim, other.shape, other.ndim, axes\n ):\n raise ValueError(\"shape-mismatch for sum\")\n\n other = self.check_or_convert_other(other, compute_block_shape=True)\n\n this_axes = self.grid.grid_shape[:-axes]\n this_sum_axes = self.grid.grid_shape[-axes:]\n other_axes = other.grid.grid_shape[axes:]\n other_sum_axes = other.grid.grid_shape[:axes]\n assert this_sum_axes == other_sum_axes\n result_shape = tuple(self.shape[:-axes] + other.shape[axes:])\n result_block_shape = tuple(self.block_shape[:-axes] + other.block_shape[axes:])\n result_grid = ArrayGrid(\n shape=result_shape,\n block_shape=result_block_shape,\n dtype=array_utils.get_bop_output_type(\n \"tensordot\", self.dtype, other.dtype\n ).__name__,\n )\n assert result_grid.grid_shape == tuple(this_axes + other_axes)\n result = BlockArray(result_grid, self.cm)\n this_dims = list(itertools.product(*map(range, this_axes)))\n other_dims = list(itertools.product(*map(range, other_axes)))\n sum_dims = list(itertools.product(*map(range, this_sum_axes)))\n for i in this_dims:\n for j in other_dims:\n grid_entry = tuple(i + j)\n result_block: Block = result.blocks[grid_entry]\n sum_oids = []\n for k in sum_dims:\n self_block: Block = self.blocks[tuple(i + k)]\n other_block: Block = other.blocks[tuple(k + j)]\n dot_grid_args = self._compute_tensordot_syskwargs(\n self_block, other_block\n )\n dotted_oid = self.cm.bop(\n \"tensordot\",\n self_block.oid,\n other_block.oid,\n self_block.transposed,\n other_block.transposed,\n axes=axes,\n syskwargs={\n \"grid_entry\": dot_grid_args[0],\n \"grid_shape\": dot_grid_args[1],\n },\n )\n sum_oids.append(\n (dotted_oid, dot_grid_args[0], dot_grid_args[1], False)\n )\n result_block.oid = self._tree_reduce(\n \"sum\", sum_oids, result_block.grid_entry, result_block.grid_shape\n )\n return result\n\n def _fast_element_wise(self, op_name, other):\n \"\"\"\n Implements fast scheduling for basic element-wise operations.\n \"\"\"\n # Schedule the op first.\n blocks = np.empty(shape=self.grid.grid_shape, dtype=Block)\n for grid_entry in self.grid.get_entry_iterator():\n self_block: Block = self.blocks[grid_entry]\n other_block: Block = other.blocks[grid_entry]\n blocks[grid_entry] = block = Block(\n grid_entry=grid_entry,\n grid_shape=self_block.grid_shape,\n rect=self_block.rect,\n shape=self_block.shape,\n dtype=self_block.dtype,\n transposed=False,\n cm=self.cm,\n )\n block.oid = self.cm.bop(\n op_name,\n self_block.oid,\n other_block.oid,\n self_block.transposed,\n other_block.transposed,\n axes={},\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": self.grid.grid_shape,\n },\n )\n return BlockArray(self.grid.copy(), self.cm, blocks=blocks)\n\n def __add__(self, other):\n other = self.check_or_convert_other(other)\n if self.shape == other.shape:\n return self._fast_element_wise(\"add\", other)\n return BlockArray.from_blocks(\n self.blocks + other.blocks, result_shape=None, cm=self.cm\n )\n\n def __sub__(self, other):\n other = self.check_or_convert_other(other)\n return BlockArray.from_blocks(\n self.blocks - other.blocks, result_shape=None, cm=self.cm\n )\n\n def __mul__(self, other):\n other = self.check_or_convert_other(other)\n return BlockArray.from_blocks(\n self.blocks * other.blocks, result_shape=None, cm=self.cm\n )\n\n def __truediv__(self, other):\n other = self.check_or_convert_other(other)\n return BlockArray.from_blocks(\n self.blocks / other.blocks, result_shape=None, cm=self.cm\n )\n\n def __pow__(self, other):\n other = self.check_or_convert_other(other)\n return BlockArray.from_blocks(\n self.blocks ** other.blocks, result_shape=None, cm=self.cm\n )\n\n def __invert__(self):\n return self.ufunc(\"invert\")\n\n __iadd__ = __add__\n __isub__ = __sub__\n __imul__ = __mul__\n __imatmul__ = __matmul__\n __itruediv__ = __truediv__\n __ipow__ = __pow__\n\n # TODO (hme): Type check bool ops.\n def __bool__(self):\n # pylint: disable=no-member\n dtype = self.dtype\n if isinstance(dtype, type):\n # TODO (hme): Fix this strange issue.\n dtype = dtype()\n if isinstance(dtype, (bool, np.bool)) and np.sum(self.shape) == len(self.shape):\n return self.get().__bool__()\n return True\n\n def __inequality__(self, op, other):\n other = self.check_or_convert_other(other)\n assert (\n other.shape == () or other.shape == self.shape\n ), \"Currently supports comparison with scalars only.\"\n shape = array_utils.broadcast(self.shape, other.shape).shape\n block_shape = array_utils.broadcast_block_shape(\n self.shape, other.shape, self.block_shape\n )\n dtype = bool.__name__\n grid = ArrayGrid(shape, block_shape, dtype)\n result = BlockArray(grid, self.cm)\n for grid_entry in result.grid.get_entry_iterator():\n if other.shape == ():\n other_block: Block = other.blocks.item()\n else:\n other_block: Block = other.blocks[grid_entry]\n result.blocks[grid_entry] = self.blocks[grid_entry].bop(\n op, other_block, args={}\n )\n\n return result\n\n def __ge__(self, other):\n return self.__inequality__(\"ge\", other)\n\n def __gt__(self, other):\n return self.__inequality__(\"gt\", other)\n\n def __le__(self, other):\n return self.__inequality__(\"le\", other)\n\n def __lt__(self, other):\n return self.__inequality__(\"lt\", other)\n\n def __eq__(self, other):\n return self.__inequality__(\"eq\", other)\n\n def __ne__(self, other):\n return self.__inequality__(\"ne\", other)\n\n __radd__ = __add__\n\n def __rsub__(self, other):\n other = self.check_or_convert_other(other)\n return other - self\n\n __rmul__ = __mul__\n\n def __rmatmul__(self, other):\n other = self.check_or_convert_other(other)\n return other @ self\n\n def __rtruediv__(self, other):\n other = self.check_or_convert_other(other)\n return other / self\n\n def __rpow__(self, other):\n other = self.check_or_convert_other(other)\n return other ** self\n\n def __neg__(self):\n return -1 * self\n\n def __pos__(self):\n return self\n\n def astype(self, dtype):\n grid = ArrayGrid(self.shape, self.block_shape, dtype.__name__)\n result = BlockArray(grid, self.cm)\n for grid_entry in result.grid.get_entry_iterator():\n result.blocks[grid_entry] = self.blocks[grid_entry].astype(dtype)\n return result\n\n def flattened_oids(self):\n oids = []\n for grid_entry in self.grid.get_entry_iterator():\n oid = self.blocks[grid_entry].oid\n oids.append(oid)\n return oids\n\n\nclass Reshape(object):\n @staticmethod\n def compute_shape(shape, input_shape):\n size = np.product(shape)\n if -1 in input_shape:\n new_shape = []\n other_dim_prod = 1\n negative_one_seen = False\n for dim in input_shape:\n if dim == -1:\n if negative_one_seen:\n raise Exception(\"Only one -1 permitted in reshape.\")\n negative_one_seen = True\n continue\n other_dim_prod *= dim\n if size % other_dim_prod != 0:\n raise Exception(\"Invalid shape.\")\n for dim in input_shape:\n if dim == -1:\n new_shape.append(size // other_dim_prod)\n else:\n new_shape.append(dim)\n else:\n new_shape = input_shape\n assert size == np.product(new_shape)\n return new_shape\n\n def _group_index_lists_by_block(\n self, dst_slice_tuples, src_grid: ArrayGrid, dst_index_list, src_index_list\n ):\n # TODO(hme): Keep this function here until it's needed for greater support of\n # selection/assignment operations.\n # Block grid entries needed to write to given dst_slice_selection.\n src_blocks = {}\n dst_slice_np = np.array(dst_slice_tuples).T\n dst_index_arr = np.array(dst_index_list)\n src_index_arr = np.array(src_index_list)\n # Pick the smallest type to represent indices.\n # A set of these indices may be transmitted over the network,\n # so we want to pick the smallest encoding possible.\n index_types = [\n (2 ** 8, np.uint8),\n (2 ** 16, np.uint16),\n (2 ** 32, np.uint32),\n (2 ** 64, np.uint64),\n ]\n index_type = None\n for bound, curr_index_type in index_types:\n if np.all(np.array(src_grid.block_shape) < bound) and np.all(\n dst_slice_np[1] < bound\n ):\n index_type = curr_index_type\n break\n if index_type is None:\n raise Exception(\"Unable to encode block indices, blocks are too large.\")\n for grid_entry in src_grid.get_entry_iterator():\n src_slice_np = np.array(src_grid.get_slice_tuples(grid_entry)).T\n index_pairs = []\n for i in range(src_index_arr.shape[0]):\n src_index = src_index_arr[i]\n dst_index = dst_index_arr[i]\n if np.all(\n (src_slice_np[0] <= src_index) & (src_index < src_slice_np[1])\n ):\n index_pair = (\n (dst_index - dst_slice_np[0]).astype(index_type),\n (src_index - src_slice_np[0]).astype(index_type),\n )\n index_pairs.append(index_pair)\n if len(index_pairs) > 0:\n src_blocks[grid_entry] = index_pairs\n return src_blocks\n\n def _arbitrary_reshape(self, arr: BlockArray, shape, block_shape) -> BlockArray:\n # This is the worst-case scenario.\n # Generate index mappings per block, and group source indices to minimize\n # RPCs and generation of new objects.\n cm = arr.cm\n dst_arr = BlockArray.empty(\n shape=shape, block_shape=block_shape, dtype=arr.dtype, cm=cm\n )\n for dst_grid_entry in dst_arr.grid.get_entry_iterator():\n dst_block: Block = dst_arr.blocks[dst_grid_entry]\n dst_slice_selection = dst_arr.grid.get_slice(dst_grid_entry)\n dst_index_list = array_utils.slice_sel_to_index_list(dst_slice_selection)\n src_index_list = array_utils.translate_index_list(\n dst_index_list, shape, arr.shape\n )\n src_blocks = self._group_index_lists_by_block(\n dst_arr.grid.get_slice_tuples(dst_grid_entry),\n arr.grid,\n dst_index_list,\n src_index_list,\n )\n for src_grid_entry in src_blocks:\n src_block: Block = arr.blocks[src_grid_entry]\n index_pairs = src_blocks[src_grid_entry]\n syskwargs = {\n \"grid_entry\": dst_grid_entry,\n \"grid_shape\": dst_arr.grid.grid_shape,\n }\n dst_block.oid = cm.update_block_by_index(\n dst_block.oid, src_block.oid, index_pairs, syskwargs=syskwargs\n )\n return dst_arr\n\n def _block_shape_reshape(self, arr, block_shape):\n rarr: BlockArray = BlockArray.empty(arr.shape, block_shape, arr.dtype, arr.cm)\n for grid_entry in rarr.grid.get_entry_iterator():\n grid_entry_slice = rarr.grid.get_slice(grid_entry)\n # TODO (hme): This could be less costly.\n rarr[grid_entry_slice] = arr[grid_entry_slice]\n return rarr\n\n def _strip_ones(self, shape):\n return tuple(filter(lambda x: x != 1, shape))\n\n def _is_simple_reshape(self, arr: BlockArray, shape, block_shape):\n # Is the reshape a difference of factors of 1?\n # Strip out 1s and compare.\n return self._strip_ones(shape) == self._strip_ones(\n arr.shape\n ) and self._strip_ones(block_shape) == self._strip_ones(arr.block_shape)\n\n def _simple_reshape(self, arr, shape, block_shape):\n # Reshape the array of blocks only.\n # This is only used when the difference in shape are factors of 1s,\n # and the ordering of other factors are maintained.\n\n # Check assumptions.\n assert len(self._strip_ones(arr.shape)) == len(self._strip_ones(shape))\n\n # Create new grid, and perform reshape on blocks\n # to simplify access to source blocks.\n grid = ArrayGrid(shape, block_shape, dtype=arr.dtype.__name__)\n src_blocks = arr.blocks.reshape(grid.grid_shape)\n rarr = BlockArray(grid, arr.cm)\n for grid_entry in grid.get_entry_iterator():\n src_block: Block = src_blocks[grid_entry]\n dst_block: Block = rarr.blocks[grid_entry]\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n dst_block.oid = arr.cm.reshape(\n src_block.oid, dst_block.shape, syskwargs=syskwargs\n )\n return rarr\n\n def _validate(self, arr, shape, block_shape):\n assert -1 not in shape\n assert -1 not in block_shape\n assert len(shape) == len(block_shape)\n assert np.product(arr.shape) == np.product(shape)\n\n def __call__(self, arr: BlockArray, shape, block_shape):\n self._validate(arr, shape, block_shape)\n if arr.shape == shape and arr.block_shape == block_shape:\n return arr\n elif self._is_simple_reshape(arr, shape, block_shape):\n return self._simple_reshape(arr, shape, block_shape)\n elif arr.shape == shape and arr.block_shape != block_shape:\n return self._block_shape_reshape(arr, block_shape)\n elif arr.shape != shape and arr.block_shape == block_shape:\n # Just do full reshape for this case as well.\n # Though there may be a better solution, we generally expect\n # the block shape to change with array shape.\n return self._arbitrary_reshape(arr, shape, block_shape)\n else:\n assert arr.shape != shape and arr.block_shape != block_shape\n return self._arbitrary_reshape(arr, shape, block_shape)\n"
] | [
[
"numpy.product",
"numpy.core.numeric.normalize_axis_tuple",
"numpy.empty_like",
"numpy.ndarray",
"numpy.all",
"numpy.copy",
"numpy.array",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WenqiJiang/FPGA-Accelerator-for-Recommender-Systems | [
"6c3031487cd1447b7f5362483c14b108177387bb",
"6c3031487cd1447b7f5362483c14b108177387bb",
"6c3031487cd1447b7f5362483c14b108177387bb"
] | [
"tf_wide_deep_377_table_2048/python/train.py",
"unused/tf2_wide_deep/python/lib/build_estimator.py",
"unused/py3_tf2_wide_deep/python_v2/lib/utils/image_preprocessing.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: lapis-hong\n# @Date : 2018/1/15\n\"\"\"Training Wide and Deep Model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\nimport shutil\nimport sys\nimport time\n\nimport tensorflow as tf\n\nfrom lib.read_conf import Config\nfrom lib.dataset import input_fn\nfrom lib.build_estimator import build_estimator, build_custom_estimator\nfrom lib.utils.util import elapse_time, list_files\n\nCONFIG = Config().train\nparser = argparse.ArgumentParser(description='Train Wide and Deep Model.')\n\nparser.add_argument(\n '--model_dir', type=str, default=CONFIG[\"model_dir\"],\n help='Base directory for the model.')\nparser.add_argument(\n '--model_type', type=str, default=CONFIG[\"model_type\"],\n help=\"Valid model types: {'wide', 'deep', 'wide_deep'}.\")\nparser.add_argument(\n '--train_epochs', type=int, default=CONFIG[\"train_epochs\"],\n help='Number of training epochs.')\nparser.add_argument(\n '--epochs_per_eval', type=int, default=CONFIG[\"epochs_per_eval\"],\n help='The number of training epochs to run between evaluations.')\nparser.add_argument(\n '--batch_size', type=int, default=CONFIG[\"batch_size\"],\n help='Number of examples per batch.')\nparser.add_argument(\n '--train_data', type=str, default=CONFIG[\"train_data\"],\n help='Path to the train data.')\nparser.add_argument(\n '--eval_data', type=str, default=CONFIG[\"eval_data\"],\n help='Path to the validation data.')\nparser.add_argument(\n '--pred_data', type=str, default=CONFIG[\"pred_data\"],\n help='Path to the validation data.')\nparser.add_argument(\n '--test_data', type=str, default=CONFIG[\"test_data\"],\n help='Path to the test data.')\nparser.add_argument(\n '--image_train_data', type=str, default=CONFIG[\"image_train_data\"],\n help='Path to the train data.')\nparser.add_argument(\n '--image_eval_data', type=str, default=CONFIG[\"image_eval_data\"],\n help='Path to the train data.')\nparser.add_argument(\n '--image_test_data', type=str, default=CONFIG[\"image_test_data\"],\n help='Path to the train data.')\nparser.add_argument(\n '--keep_train', type=int, default=CONFIG[\"keep_train\"],\n help='Whether to keep training on previous trained model.')\n\n\ndef train_and_eval(model):\n for n in range(FLAGS.train_epochs):\n tf.logging.info('=' * 30 + ' START EPOCH {} '.format(n + 1) + '=' * 30 + '\\n')\n train_data_list = list_files(FLAGS.train_data) # dir to file list\n for f in train_data_list:\n t0 = time.time()\n tf.logging.info('<EPOCH {}>: Start training {}'.format(n + 1, f))\n model.train(\n input_fn=lambda: input_fn(f, FLAGS.image_train_data, 'train', FLAGS.batch_size),\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None)\n tf.logging.info('<EPOCH {}>: Finish training {}, take {} mins'.format(n + 1, f, elapse_time(t0)))\n print('-' * 80)\n tf.logging.info('<EPOCH {}>: Start evaluating {}'.format(n + 1, FLAGS.eval_data))\n t0 = time.time()\n results = model.evaluate(\n input_fn=lambda: input_fn(FLAGS.eval_data, FLAGS.image_eval_data, 'eval', FLAGS.batch_size),\n steps=None, # Number of steps for which to evaluate model.\n hooks=None,\n checkpoint_path=None, # latest checkpoint in model_dir is used.\n name=None)\n tf.logging.info('<EPOCH {}>: Finish evaluation {}, take {} mins'.format(n + 1, FLAGS.eval_data, elapse_time(t0)))\n print('-' * 80)\n # Display evaluation metrics\n for key in sorted(results):\n print('{}: {}'.format(key, results[key]))\n # every epochs_per_eval test the model (use larger test dataset)\n if (n+1) % FLAGS.epochs_per_eval == 0:\n tf.logging.info('<EPOCH {}>: Start testing {}'.format(n + 1, FLAGS.test_data))\n results = model.evaluate(\n input_fn=lambda: input_fn(FLAGS.test_data, FLAGS.image_test_data, 'pred', FLAGS.batch_size),\n steps=None, # Number of steps for which to evaluate model.\n hooks=None,\n checkpoint_path=None, # If None, the latest checkpoint in model_dir is used.\n name=None)\n tf.logging.info('<EPOCH {}>: Finish testing {}, take {} mins'.format(n + 1, FLAGS.test_data, elapse_time(t0)))\n print('-' * 80)\n # Display evaluation metrics\n for key in sorted(results):\n print('{}: {}'.format(key, results[key]))\n\n\ndef dynamic_train(model):\n \"\"\"Dynamic train mode.\n For example:\n train_data_files: [0301, 0302, 0303, ...]\n train mode:\n first take 0301 as train data, 0302 as test data;\n then keep training take 0302 as train data, 0303 as test data ...\n \"\"\"\n data_files = list_files(FLAGS.train_data)\n data_files.sort()\n assert len(data_files) > 1, 'Dynamic train mode need more than 1 data file'\n\n for i in range(len(data_files)-1):\n train_data = data_files[i]\n test_data = data_files[i+1]\n tf.logging.info('=' * 30 + ' START TRAINING DATA: {} '.format(train_data) + '=' * 30 + '\\n')\n for n in range(FLAGS.train_epochs):\n t0 = time.time()\n tf.logging.info('START TRAIN DATA <{}> <EPOCH {}>'.format(train_data, n + 1))\n model.train(\n input_fn=lambda: input_fn(train_data, FLAGS.image_train_data, 'train', FLAGS.batch_size),\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None)\n tf.logging.info('FINISH TRAIN DATA <{}> <EPOCH {}> take {} mins'.format(train_data, n + 1, elapse_time(t0)))\n print('-' * 80)\n tf.logging.info('START EVALUATE TEST DATA <{}> <EPOCH {}>'.format(test_data, n + 1))\n t0 = time.time()\n results = model.evaluate(\n input_fn=lambda: input_fn(test_data, FLAGS.image_eval_data, 'eval', FLAGS.batch_size),\n steps=None, # Number of steps for which to evaluate model.\n hooks=None,\n checkpoint_path=None, # latest checkpoint in model_dir is used.\n name=None)\n tf.logging.info('FINISH EVALUATE TEST DATA <{}> <EPOCH {}>: take {} mins'.format(test_data, n + 1, elapse_time(t0)))\n print('-' * 80)\n # Display evaluation metrics\n for key in sorted(results):\n print('{}: {}'.format(key, results[key]))\n\n\ndef train(model):\n for n in range(FLAGS.train_epochs):\n tf.logging.info('=' * 30 + ' START EPOCH {} '.format(n + 1) + '=' * 30 + '\\n')\n train_data_list = list_files(FLAGS.train_data) # dir to file list\n for f in train_data_list:\n t0 = time.time()\n tf.logging.info('<EPOCH {}>: Start training {}'.format(n + 1, f))\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n model.train(\n input_fn=lambda: input_fn(f, FLAGS.image_train_data, 'train', FLAGS.batch_size),\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None)\n tf.logging.info('<EPOCH {}>: Finish training {}, take {} mins'.format(n + 1, f, elapse_time(t0)))\n\n\ndef train_and_eval_api(model):\n train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(FLAGS.train_data, FLAGS.image_train_data, FLAGS.batch_size), max_steps=10000)\n eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(FLAGS.eval_data, FLAGS.image_eval_data, FLAGS.batch_size))\n tf.estimator.train_and_evaluate(model, train_spec, eval_spec)\n\n\ndef main(unused_argv):\n CONFIG = Config()\n print(\"Using TensorFlow Version %s\" % tf.__version__)\n # assert \"1.4\" <= tf.__version__, \"Need TensorFlow r1.4 or Later.\"\n print('\\nModel Type: {}'.format(FLAGS.model_type))\n model_dir = os.path.join(FLAGS.model_dir, FLAGS.model_type)\n print('\\nModel Directory: {}'.format(model_dir))\n\n print(\"\\nUsing Train Config:\")\n for k, v in CONFIG.train.items():\n print('{}: {}'.format(k, v))\n print(\"\\nUsing Model Config:\")\n for k, v in CONFIG.model.items():\n print('{}: {}'.format(k, v))\n\n if not FLAGS.keep_train:\n # Clean up the model directory if not keep training\n shutil.rmtree(model_dir, ignore_errors=True)\n print('Remove model directory: {}'.format(model_dir))\n # model = build_estimator(model_dir, FLAGS.model_type)\n model = build_custom_estimator(model_dir, FLAGS.model_type)\n tf.logging.info('Build estimator: {}'.format(model))\n\n if CONFIG.train['dynamic_train']:\n train_fn = dynamic_train\n print(\"Using dynamic train mode.\")\n else:\n train_fn = train_and_eval\n\n if CONFIG.distribution[\"is_distribution\"]:\n print(\"Using PID: {}\".format(os.getpid()))\n cluster = CONFIG.distribution[\"cluster\"]\n job_name = CONFIG.distribution[\"job_name\"]\n task_index = CONFIG.distribution[\"task_index\"]\n print(\"Using Distributed TensorFlow. Local host: {} Job_name: {} Task_index: {}\"\n .format(cluster[job_name][task_index], job_name, task_index))\n cluster = tf.train.ClusterSpec(CONFIG.distribution[\"cluster\"])\n server = tf.train.Server(cluster,\n job_name=job_name,\n task_index=task_index)\n # distributed can not including eval.\n train_fn = train\n if job_name == 'ps':\n # wait for incoming connection forever\n server.join()\n # sess = tf.Session(server.target)\n # queue = create_done_queue(task_index, num_workers)\n # for i in range(num_workers):\n # sess.run(queue.dequeue())\n # print(\"ps {} received worker {} done\".format(task_index, i)\n # print(\"ps {} quitting\".format(task_index))\n else: # TODO:supervisor & MonotoredTrainingSession & experiment (deprecated)\n train_fn(model)\n # train_and_eval(model)\n # Each worker only needs to contact the PS task(s) and the local worker task.\n # config = tf.ConfigProto(device_filters=[\n # '/job:ps', '/job:worker/task:%d' % arguments.task_index])\n # with tf.device(tf.train.replica_device_setter(\n # worker_device=\"/job:worker/task:%d\" % task_index,\n # cluster=cluster)):\n # e = _create_experiment_fn()\n # e.train_and_evaluate() # call estimator's train() and evaluate() method\n # hooks = [tf.train.StopAtStepHook(last_step=10000)]\n # with tf.train.MonitoredTrainingSession(\n # master=server.target,\n # is_chief=(task_index == 0),\n # checkpoint_dir=args.model_dir,\n # hooks=hooks) as mon_sess:\n # while not mon_sess.should_stop():\n # # mon_sess.run()\n # classifier.fit(input_fn=train_input_fn, steps=1)\n else:\n # local run\n train_fn(model)\n\n\nif __name__ == '__main__':\n # Set to INFO for tracking training, default is WARN. ERROR for least messages\n tf.logging.set_verbosity(tf.logging.INFO)\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: lapis-hong\n# @Date : 2018/1/15\n\"\"\"\nBuild feature columns using tf.feature_column API.\nBuild estimator using tf.estimator API and custom API (defined in lib module)\nUse function `build_estimator` to use official classifier\nUse function `build_costum_estimator` to use custom classifier.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\n# fix ImportError: No mudule named lib.*\nimport sys\nPACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, PACKAGE_DIR)\n\nfrom lib.read_conf import Config\nfrom lib.utils.model_util import activation_fn\nfrom lib.joint import WideAndDeepClassifier\n\n\n# wide columns\ncategorical_column_with_identity = tf.feature_column.categorical_column_with_identity\ncategorical_column_with_hash_bucket = tf.feature_column.categorical_column_with_hash_bucket\ncategorical_column_with_vocabulary_list = tf.feature_column.categorical_column_with_vocabulary_list\ncrossed_column = tf.feature_column.crossed_column\nbucketized_column = tf.feature_column.bucketized_column\n# deep columns\nembedding_column = tf.feature_column.embedding_column\nindicator_column = tf.feature_column.indicator_column\nnumeric_column = tf.feature_column.numeric_column\n\nCONF = Config()\nif CONF.train['pos_sample_loss_weight'] is None and CONF.train['neg_sample_loss_weight'] is None:\n weight_column = None\nelse:\n weight_column = 'weight_column'\n\n\ndef _build_model_columns():\n \"\"\"\n Build wide and deep feature columns from custom feature conf using tf.feature_column API\n wide_columns: category features + cross_features + [discretized continuous features]\n deep_columns: continuous features + category features(onehot or embedding for sparse features) + [cross_features(embedding)]\n Return: \n _CategoricalColumn and __DenseColumn instance in tf.feature_column API\n \"\"\"\n def embedding_dim(dim):\n \"\"\"empirical embedding dim\"\"\"\n return int(np.power(2, np.ceil(np.log(dim**0.25))))\n\n def normalizer_fn_builder(scaler, normalization_params):\n \"\"\"normalizer_fn builder\"\"\"\n if scaler == 'min_max':\n return lambda x: (x-normalization_params[0]) / (normalization_params[1]-normalization_params[0])\n elif scaler == 'standard':\n return lambda x: (x-normalization_params[0]) / normalization_params[1]\n else:\n return lambda x: tf.math.log(x)\n\n feature_conf_dic = CONF.read_feature_conf()\n cross_feature_list = CONF.read_cross_feature_conf()\n tf.compat.v1.logging.info('Total used feature class: {}'.format(len(feature_conf_dic)))\n tf.compat.v1.logging.info('Total used cross feature class: {}'.format(len(cross_feature_list)))\n\n wide_columns = []\n deep_columns = []\n wide_dim = 0\n deep_dim = 0\n feature_num = 0\n for feature, conf in feature_conf_dic.items():\n f_type, f_tran, f_param = conf[\"type\"], conf[\"transform\"], conf[\"parameter\"]\n feature_num += 1\n if f_type == 'category':\n\n if f_tran == 'hash_bucket':\n hash_bucket_size = f_param\n embed_dim = embedding_dim(hash_bucket_size)\n col = categorical_column_with_hash_bucket(feature,\n hash_bucket_size=hash_bucket_size,\n dtype=tf.string)\n wide_columns.append(col)\n deep_columns.append(embedding_column(col,\n dimension=embed_dim,\n combiner='mean',\n initializer=None,\n ckpt_to_load_from=None,\n tensor_name_in_ckpt=None,\n max_norm=None,\n trainable=True))\n wide_dim += hash_bucket_size\n deep_dim += embed_dim\n\n elif f_tran == 'vocab':\n col = categorical_column_with_vocabulary_list(feature,\n vocabulary_list=map(str, f_param),\n dtype=None,\n default_value=-1,\n num_oov_buckets=0) # len(vocab)+num_oov_buckets\n wide_columns.append(col)\n deep_columns.append(indicator_column(col))\n wide_dim += len(f_param)\n deep_dim += len(f_param)\n\n elif f_tran == 'identity':\n num_buckets = f_param\n col = categorical_column_with_identity(feature,\n num_buckets=num_buckets,\n default_value=0) # Values outside range will result in default_value if specified, otherwise it will fail.\n wide_columns.append(col)\n deep_columns.append(indicator_column(col))\n wide_dim += num_buckets\n deep_dim += num_buckets\n else:\n normalizaton, boundaries = f_param[\"normalization\"], f_param[\"boundaries\"]\n if f_tran is None:\n normalizer_fn = None\n else:\n normalizer_fn = normalizer_fn_builder(f_tran, tuple(normalizaton))\n col = numeric_column(feature,\n shape=(1,),\n default_value=0, # default None will fail if an example does not contain this column.\n dtype=tf.float32,\n normalizer_fn=normalizer_fn)\n if boundaries: # whether include continuous features in wide part\n wide_columns.append(bucketized_column(col, boundaries=boundaries))\n wide_dim += int(len(boundaries)+1)\n deep_columns.append(col)\n deep_dim += 1\n\n cross_feature_num = 0\n for cross_features, hash_bucket_size, is_deep in cross_feature_list:\n cf_list = []\n cross_feature_num += 1\n for f in cross_features:\n f_type = feature_conf_dic[f][\"type\"]\n f_tran = feature_conf_dic[f][\"transform\"]\n f_param = feature_conf_dic[f][\"parameter\"]\n if f_type == 'continuous':\n cf_list.append(bucketized_column(numeric_column(f, default_value=0), boundaries=f_param['boundaries']))\n else:\n if f_tran == 'identity':\n # If an input feature is of numeric type, you can use categorical_column_with_identity\n cf_list.append(categorical_column_with_identity(f, num_buckets=f_param,\n default_value=0))\n else:\n cf_list.append(f) # category col put the name in crossed_column\n col = crossed_column(cf_list, hash_bucket_size)\n wide_columns.append(col)\n wide_dim += hash_bucket_size\n wide_dim = int(wide_dim)\n if is_deep:\n deep_columns.append(embedding_column(col, dimension=embedding_dim(hash_bucket_size)))\n deep_dim += embedding_dim(hash_bucket_size)\n\n print(\"feature_num: {}\\ncross_feature_num: {}\\ntotal_feature_num: {}\".format(\n feature_num, cross_feature_num, feature_num + cross_feature_num))\n\n # add columns logging info\n tf.compat.v1.logging.info('Build total {} wide columns'.format(len(wide_columns)))\n for col in wide_columns:\n tf.compat.v1.logging.debug('Wide columns: {}'.format(col))\n tf.compat.v1.logging.info('Build total {} deep columns'.format(len(deep_columns)))\n for col in deep_columns :\n tf.compat.v1.logging.debug('Deep columns: {}'.format(col))\n tf.compat.v1.logging.info('Wide input dimension is: {}'.format(wide_dim))\n tf.compat.v1.logging.info('Deep input dimension is: {}'.format(deep_dim))\n\n return wide_columns, deep_columns\n\n\ndef _build_distribution():\n \"\"\"Build distribution configuration variable TF_CONFIG in tf.estimator API\"\"\"\n TF_CONFIG = CONF.distribution\n if TF_CONFIG[\"is_distribution\"]:\n cluster_spec = TF_CONFIG[\"cluster\"]\n job_name = TF_CONFIG[\"job_name\"]\n task_index = TF_CONFIG[\"task_index\"]\n os.environ['TF_CONFIG'] = json.dumps(\n {'cluster': cluster_spec,\n 'task': {'type': job_name, 'index': task_index}})\n run_config = tf.estimator.RunConfig()\n if job_name in [\"ps\", \"chief\", \"worker\"]:\n assert run_config.master == 'grpc://' + cluster_spec[job_name][task_index] # grpc://10.120.180.212\n assert run_config.task_type == job_name\n assert run_config.task_id == task_index\n assert run_config.num_ps_replicas == len(cluster_spec[\"ps\"])\n assert run_config.num_worker_replicas == len(cluster_spec[\"worker\"]) + len(cluster_spec[\"chief\"])\n assert run_config.is_chief == (job_name == \"chief\")\n elif job_name == \"evaluator\":\n assert run_config.master == ''\n assert run_config.evaluator_master == ''\n assert run_config.task_id == 0\n assert run_config.num_ps_replicas == 0\n assert run_config.num_worker_replicas == 0\n assert run_config.cluster_spec == {}\n assert run_config.task_type == 'evaluator'\n assert not run_config.is_chief\n\n\ndef build_estimator(model_dir, model_type):\n \"\"\"Build an estimator using official tf.estimator API.\n Args:\n model_dir: model save base directory\n model_type: one of {`wide`, `deep`, `wide_deep`}\n Returns:\n model instance of tf.estimator.Estimator class\n \"\"\"\n wide_columns, deep_columns = _build_model_columns()\n _build_distribution()\n # Create a tf.estimator.RunConfig to ensure the model is run on CPU, which\n # trains faster than GPU for this model.\n run_config = tf.estimator.RunConfig(**CONF.runconfig).replace(\n session_config=tf.compat.v1.ConfigProto(device_count={'GPU': 0}))\n\n if model_type == 'wide':\n return tf.compat.v1.estimator.LinearClassifier(\n model_dir=model_dir,\n feature_columns=wide_columns,\n weight_column=weight_column,\n optimizer=tf.compat.v1.train.FtrlOptimizer( # can not read from conf\n learning_rate=0.1,\n l1_regularization_strength=0.5,\n l2_regularization_strength=1),\n partitioner=None,\n config=run_config, loss_reduction=tf.keras.losses.Reduction.SUM)\n elif model_type == 'deep':\n return tf.compat.v1.estimator.DNNClassifier(\n model_dir=model_dir,\n feature_columns=deep_columns,\n hidden_units=CONF.model[\"dnn_hidden_units\"],\n optimizer=tf.compat.v1.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.1,\n l2_regularization_strength=0.1), # {'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'}\n activation_fn=activation_fn(CONF.model[\"dnn_activation_function\"]), # tf.nn.relu vs 'tf.nn.relu'\n dropout=CONF.model[\"dnn_dropout\"],\n weight_column=weight_column,\n input_layer_partitioner=None,\n config=run_config, loss_reduction=tf.keras.losses.Reduction.SUM)\n else:\n return tf.compat.v1.estimator.DNNLinearCombinedClassifier(\n model_dir=model_dir, # self._model_dir = model_dir or self._config.model_dir\n linear_feature_columns=wide_columns,\n linear_optimizer=tf.compat.v1.train.FtrlOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.5,\n l2_regularization_strength=1),\n dnn_feature_columns=deep_columns,\n dnn_optimizer=tf.compat.v1.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.1,\n l2_regularization_strength=0.1),\n dnn_hidden_units=CONF.model[\"dnn_hidden_units\"],\n dnn_activation_fn=activation_fn(CONF.model[\"dnn_activation_function\"]),\n dnn_dropout=CONF.model[\"dnn_dropout\"],\n n_classes=2,\n weight_column=weight_column,\n label_vocabulary=None,\n input_layer_partitioner=None,\n config=run_config, loss_reduction=tf.keras.losses.Reduction.SUM)\n\n\ndef build_custom_estimator(model_dir, model_type):\n \"\"\"Build an estimator using custom WideAndDeepClassifier API.\n Args:\n model_dir: model save base directory\n model_type: one of {`wide`, `deep`, `wide_deep`}\n Returns:\n model instance of lib.joint.WideAndDeepClassifier class\n \"\"\"\n wide_columns, deep_columns = _build_model_columns()\n _build_distribution()\n # Create a tf.estimator.RunConfig to ensure the model is run on CPU, which\n # trains faster than GPU for this model.\n run_config = tf.estimator.RunConfig(**CONF.runconfig).replace(\n session_config=tf.compat.v1.ConfigProto(device_count={'GPU': 0}))\n\n return WideAndDeepClassifier(\n model_type=model_type,\n model_dir=model_dir,\n with_cnn=CONF.model[\"cnn_use_flag\"],\n cnn_optimizer=CONF.model[\"cnn_optimizer\"],\n linear_feature_columns=wide_columns,\n linear_optimizer=CONF.model[\"linear_optimizer\"],\n dnn_feature_columns=deep_columns,\n dnn_optimizer=CONF.model[\"dnn_optimizer\"],\n dnn_hidden_units=CONF.model[\"dnn_hidden_units\"],\n dnn_connected_mode=CONF.model[\"dnn_connected_mode\"],\n n_classes=2,\n weight_column=weight_column,\n label_vocabulary=None,\n input_layer_partitioner=None,\n config=run_config)\n\nif __name__ == '__main__':\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)\n # _build_model_columns()\n # _build_distribution()\n model = build_estimator('../model', 'wide')\n model = build_custom_estimator('../model', 'wide')\n # print(model.config) # <tensorflow.python.estimator.run_config.RunConfig object at 0x118de4e10>\n # print(model.model_dir) # ../model\n # print(model.model_fn) # <function public_model_fn at 0x118de7b18>\n # print(model.params) # {}\n # print(model.get_variable_names())\n # print(model.get_variable_value('dnn/hiddenlayer_0/bias'))\n # print(model.get_variable_value('dnn/hiddenlayer_0/bias/Adagrad'))\n # print(model.get_variable_value('dnn/hiddenlayer_0/kernel'))\n # print(model.latest_checkpoint()) # another 4 method is export_savedmodel,train evaluate predict\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: lapis-hong\n# @Date : 2018/3/5\n\"\"\"Provides custom function to preprocess images.\nTODO: custom preprocess for CTR task\n\"\"\"\n\nimport tensorflow as tf\n\n\ndef preprocess_image(image, is_training, height, width, depth):\n \"\"\"Preprocess a single image of layout [height, width, depth].\"\"\"\n if is_training:\n # Resize the image to add four extra pixels on each side.\n image = tf.image.resize_with_crop_or_pad(\n image, height + 8, width + 8)\n # Randomly crop a [_HEIGHT, _WIDTH] section of the image.\n image = tf.image.random_crop(image, [height, width, depth])\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n # Subtract off the mean and divide by the variance of the pixels.\n image = tf.image.per_image_standardization(image)\n return image\n"
] | [
[
"tensorflow.train.Server",
"tensorflow.train.ClusterSpec",
"tensorflow.logging.set_verbosity",
"tensorflow.estimator.train_and_evaluate",
"tensorflow.app.run"
],
[
"numpy.log",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.train.ProximalAdagradOptimizer",
"tensorflow.compat.v1.train.FtrlOptimizer",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.math.log",
"tensorflow.estimator.RunConfig"
],
[
"tensorflow.image.random_crop",
"tensorflow.image.random_flip_left_right",
"tensorflow.image.per_image_standardization",
"tensorflow.image.resize_with_crop_or_pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
molokhovdmitry/placeholder | [
"cc0a983af91fcbea3dcd7b9a16db471b000b5ff5"
] | [
"model/create.py"
] | [
"\"\"\"\nMIT License\n\nCopyright (c) 2021 molokhovdmitry\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\"\"\"This file creates the model (model.h5) and class (classes.txt) files.\"\"\"\n\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\nfrom config import (DOWNLOAD_PATH, MODEL_PATH, IMG_SIZE,\n EPOCHS, DROPOUT, VALIDATION_SPLIT,\n BATCH_SIZE, SHUFFLE_BUFFER, PREFETCH_BUFFER,\n VISUALIZE_RESULTS)\n\n\nDATA_PATH = Path.joinpath(Path(DOWNLOAD_PATH), \"frames\")\nMODEL_PATH = Path(MODEL_PATH)\nMODEL_FILE = Path.joinpath(MODEL_PATH, \"model.h5\")\nCLASS_FILE = Path.joinpath(MODEL_PATH, \"classes.txt\")\n\nIMG_HEIGHT = IMG_SIZE[\"height\"]\nIMG_WIDTH = IMG_SIZE[\"width\"]\n\n# Get all classes.\nCLASS_NAMES = [category.name for category in DATA_PATH.iterdir()]\nNUM_CLASSES = len(CLASS_NAMES)\n\n# Save classes in a txt file.\nCLASS_FILE.touch()\nclasses = \"\"\nfor name in CLASS_NAMES:\n classes += str(name) + '\\n'\nCLASS_FILE.write_text(classes)\n\n\n\"\"\"\nGPU support fix.\nhttps://github.com/tensorflow/tensorflow/issues/24828#issuecomment-464910864\n\"\"\"\nconfig = tf.compat.v1.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.compat.v1.Session(config=config)\n\n\ndef create():\n \"\"\"Creates a model.\"\"\"\n # Load the data.\n train_ds, val_ds = load_data(str(DATA_PATH))\n\n # Create and compile the model.\n model = get_model()\n model.summary()\n\n # Fit the model and save the history.\n history = model.fit(train_ds, validation_data=val_ds, epochs=EPOCHS)\n\n # Save the model to a file.\n model.save(str(MODEL_FILE))\n print(\"Model saved.\")\n\n if VISUALIZE_RESULTS:\n # Make loss and accuracy plots with history data.\n make_plots(history, EPOCHS)\n\n\ndef load_data(data_dir):\n \"\"\"Loads the data. Returns tuple (`train_ds`, `val_ds`).\"\"\"\n # Training data.\n train_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=VALIDATION_SPLIT,\n subset=\"training\",\n seed=123,\n image_size=(IMG_HEIGHT, IMG_WIDTH),\n batch_size=BATCH_SIZE\n )\n\n # Validation data.\n val_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=VALIDATION_SPLIT,\n subset=\"validation\",\n seed=123,\n image_size=(IMG_HEIGHT, IMG_WIDTH),\n batch_size=BATCH_SIZE\n )\n\n # Configure the dataset for performance.\n train_ds = train_ds.shuffle(SHUFFLE_BUFFER).\\\n prefetch(buffer_size=PREFETCH_BUFFER)\n val_ds = val_ds.prefetch(buffer_size=PREFETCH_BUFFER)\n\n return train_ds, val_ds\n\n\ndef get_model():\n \"\"\"Creates and compiles neural network.\"\"\"\n model = Sequential([\n layers.experimental.preprocessing.\\\n Rescaling(1./255, input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),\n layers.Conv2D(128, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Flatten(),\n layers.Dense(256, activation='relu'),\n layers.Dropout(DROPOUT),\n layers.Dense(NUM_CLASSES),\n ])\n\n model.compile(\n optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy']\n )\n\n return model\n\n\ndef make_plots(history, epochs):\n \"\"\"Visualizes training results.\"\"\"\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n\n loss = history.history['loss']\n val_loss = history = history.history['val_loss']\n epochs_range = range(epochs)\n\n plt.figure(figsize=(8, 8))\n\n plt.subplot(1, 2, 1)\n plt.plot(epochs_range, acc, label=\"Training Accuracy\")\n plt.plot(epochs_range, val_acc, label=\"Validation Accuracy\")\n plt.legend(loc=\"lower right\")\n plt.title(\"Training and Validation Accuracy\")\n\n plt.subplot(1, 2, 2)\n plt.plot(epochs_range, loss, label=\"Traing Loss\")\n plt.plot(epochs_range, val_loss, label=\"Validation Loss\")\n plt.legend(loc=\"upper right\")\n plt.title(\"Training and Validation Loss\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n create()\n"
] | [
[
"matplotlib.pyplot.legend",
"tensorflow.compat.v1.ConfigProto",
"matplotlib.pyplot.title",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.preprocessing.image_dataset_from_directory",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.compat.v1.Session",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"tensorflow.keras.layers.Dropout",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.experimental.preprocessing.Rescaling",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
kalosisz/tensorflow | [
"b7ecd75b24f577b73500024fe91d2ea0c806d05a",
"b7ecd75b24f577b73500024fe91d2ea0c806d05a",
"b7ecd75b24f577b73500024fe91d2ea0c806d05a",
"b7ecd75b24f577b73500024fe91d2ea0c806d05a",
"b7ecd75b24f577b73500024fe91d2ea0c806d05a",
"b7ecd75b24f577b73500024fe91d2ea0c806d05a"
] | [
"tensorflow/python/framework/extension_type_test.py",
"tensorflow/lite/python/lite_v2_test_util.py",
"tensorflow/python/distribute/input_ops.py",
"tensorflow/python/saved_model/signature_serialization.py",
"tensorflow/python/util/function_utils_test.py",
"tensorflow/python/saved_model/save_context_test.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.framework.extension_type.\"\"\"\n\nimport contextlib\nimport tempfile\nimport typing\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import extension_type\nfrom tensorflow.python.framework import extension_type_field\nfrom tensorflow.python.framework import immutable_dict\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import save\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\n\n\nclass MaskedTensorV1(extension_type.ExtensionType):\n \"\"\"Example subclass of ExtensionType, used for testing.\"\"\"\n values: ops.Tensor\n mask: tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)\n\n\nclass MaskedTensorV2(extension_type.ExtensionType):\n \"\"\"Example subclass of ExtensionType, used for testing.\n\n This version adds methods, classmethod, staticmethod, and properties, and\n customizes `__repr__` and `__validate__`. It also adds a `__name__` field,\n which enables serialization.\n \"\"\"\n __name__ = 'tf.test.MaskedTensorV2'\n\n values: ops.Tensor\n mask: tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)\n\n def __repr__(self):\n if hasattr(self.values, 'numpy') and hasattr(self.mask, 'numpy'):\n return '<MaskedTensorV2 %s>' % _masked_array_repr(self.values.numpy(),\n self.mask.numpy())\n else:\n return super(MaskedTensorV2, self).__repr__()\n\n @property\n def shape(self):\n return self.values.shape\n\n @property\n def dtype(self):\n return self.values.dtype\n\n @classmethod\n def from_full_tensor(cls, values):\n return cls(values, array_ops.ones_like(values, dtype=dtypes.bool))\n\n # A dummy example to test support of staticmethod\n @staticmethod\n def doc_link():\n return 'http://example.com/masked_tensor'\n\n def __validate__(self):\n self.values.shape.assert_is_compatible_with(self.mask.shape)\n\n def with_default(self, default):\n return array_ops.where_v2(self.mask, self.values, default)\n\n __add__ = math_ops.add\n __sub__ = math_ops.subtract\n\n\ndef _masked_array_repr(values, mask):\n \"\"\"Returns a string representation for a masked numpy array.\"\"\"\n assert len(values) == len(mask)\n if len(values.shape) == 1:\n items = [repr(v) if m else '_' for (v, m) in zip(values, mask)]\n else:\n items = [_masked_array_repr(v, m) for (v, m) in zip(values, mask)]\n return '[%s]' % ', '.join(items)\n\n\nclass ForwardRefA(extension_type.ExtensionType):\n x: typing.Tuple[typing.Union['ForwardRefA', 'ForwardRefB'], ...]\n y: 'ForwardRefB'\n\n\nclass ForwardRefB(extension_type.ExtensionType):\n z: 'ForwardRefB'\n n: ops.Tensor\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ExtensionTypeTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def testAttributeAccessors(self):\n mt1 = MaskedTensorV2([1, 2, 3, 4], [True, True, False, True])\n mt2 = extension_type.pack(mt1)\n\n for mt in [mt1, mt2]:\n self.assertIsInstance(mt.values, ops.Tensor)\n self.assertAllEqual(mt.values, [1, 2, 3, 4])\n self.assertIsInstance(mt.mask, ops.Tensor)\n self.assertAllEqual(mt.mask, [True, True, False, True])\n\n def testAttributesAreImmutable(self):\n mt1 = MaskedTensorV2([1, 2, 3, 4], [True, True, False, True])\n mt2 = extension_type.pack(mt1)\n\n for mt in [mt1, mt2]:\n with self.assertRaisesRegex(\n AttributeError,\n 'Cannot mutate attribute `score` outside the custom constructor of ExtensionType'\n ):\n mt.score = 12\n with self.assertRaisesRegex(\n AttributeError,\n 'Cannot mutate attribute `values` outside the custom constructor of ExtensionType'\n ):\n mt.values = constant_op.constant([4, 3, 2, 1])\n with self.assertRaisesRegex(\n AttributeError,\n 'Cannot mutate attribute `values` outside the custom constructor of ExtensionType'\n ):\n del mt.values\n\n def testClassAndStaticMethod(self):\n mt = MaskedTensorV2.from_full_tensor([1, 2, 3, 4])\n self.assertAllEqual(mt.mask, [True, True, True, True])\n self.assertEqual(mt.doc_link(), 'http://example.com/masked_tensor')\n\n def testRepr(self):\n values = constant_op.constant([1, 2, 3, 4])\n mask = constant_op.constant([True, True, False, True])\n mt = MaskedTensorV1(values, mask)\n expected = f'MaskedTensorV1(values={values!r}, mask={mask!r})'\n self.assertEqual(expected, repr(mt))\n\n def testEagerRepr(self):\n values = constant_op.constant([1, 2, 3, 4])\n mask = constant_op.constant([True, True, False, True])\n mt = MaskedTensorV2(values, mask)\n if context.executing_eagerly():\n expected = '<MaskedTensorV2 [1, 2, _, 4]>'\n else:\n expected = f'MaskedTensorV2(values={values!r}, mask={mask!r})'\n\n self.assertEqual(expected, repr(mt))\n self.assertEqual(expected, repr(mt))\n\n def testConstructorSignature(self):\n\n class MyType(extension_type.ExtensionType):\n x: ops.Tensor\n y: tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)\n z: typing.Tuple[typing.Union[int, str], ...] = [1, 'two', 3]\n\n expected_parameters = [\n tf_inspect.Parameter('self',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter(\n 'x',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=ops.Tensor),\n tf_inspect.Parameter(\n 'y',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)),\n tf_inspect.Parameter(\n 'z',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=typing.Tuple[typing.Union[int, str], ...],\n default=(1, 'two', 3)),\n ]\n expected_sig = tf_inspect.Signature(\n expected_parameters, return_annotation=MyType)\n self.assertEqual(expected_sig, tf_inspect.signature(MyType.__init__))\n\n def testEmptyType(self):\n\n class EmptyType(extension_type.ExtensionType):\n pass\n\n self.assertEmpty(EmptyType._tf_extension_type_fields())\n x = EmptyType()\n self.assertEqual(repr(x), 'EmptyType()')\n\n def testCustomConstrutor(self):\n\n class SummarizedTensor(extension_type.ExtensionType):\n values: ops.Tensor\n mean: ops.Tensor\n max: ops.Tensor\n\n def __init__(self, values):\n self.values = ops.convert_to_tensor(values)\n self.mean = math_ops.reduce_mean(values)\n self.max = math_ops.reduce_max(values)\n\n x = SummarizedTensor([[1.0, 2, 3], [4, 5, 6]])\n self.assertAllEqual(x.values, [[1.0, 2, 3], [4, 5, 6]])\n self.assertAllEqual(x.mean, 3.5)\n self.assertAllEqual(x.max, 6)\n\n class Node(extension_type.ExtensionType):\n x: ops.Tensor\n y: typing.Optional[str] = None\n children: typing.Tuple['ExtensionTypeTest.Node', ...] = ()\n\n def testCustomConstructorWithDefaultValues(self):\n a = ExtensionTypeTest.Node(5)\n self.assertAllEqual(a.x, 5)\n self.assertIsNone(a.y)\n self.assertEqual(a.children, ())\n\n b = ExtensionTypeTest.Node(6, 'blue')\n self.assertAllEqual(b.x, 6)\n self.assertEqual(b.y, 'blue')\n self.assertEqual(b.children, ())\n\n c = ExtensionTypeTest.Node(7, children=(a, b))\n self.assertAllEqual(c.x, 7)\n self.assertIsNone(c.y)\n self.assertEqual(c.children, (a, b))\n\n def testCustomConstructorNondefaultCanotFollowDefault(self):\n with self.assertRaisesRegex(\n ValueError, \"Field without default 'd' follows field with default 'c'\"):\n\n class MyType(extension_type.ExtensionType):\n a: int\n b: str = 'Hello world'\n c: typing.Optional[ops.Tensor] = None\n d: ops.Tensor\n\n del MyType\n\n def testCustomConstrutorCantMutateNestedValues(self):\n\n class Foo(extension_type.ExtensionType):\n x: int\n\n class Bar(extension_type.ExtensionType):\n foo: Foo\n\n def __init__(self, foo):\n foo.x = 33 # This raises an exception\n\n with self.assertRaisesRegex(\n AttributeError,\n 'Cannot mutate attribute `x` outside the custom constructor of ExtensionType'\n ):\n Bar(Foo(12))\n\n def testCustomValidate(self):\n\n class AlignedTensors(extension_type.ExtensionType):\n x: ops.Tensor\n y: ops.Tensor\n\n def __validate__(self):\n self.x.shape.assert_is_compatible_with(self.y.shape)\n\n aligned = AlignedTensors([1, 2, 3], ['a', 'b', 'c'])\n self.assertAllEqual(aligned.x, [1, 2, 3])\n self.assertAllEqual(aligned.y, [b'a', b'b', b'c'])\n\n with self.assertRaises(ValueError):\n AlignedTensors([1, 2, 3], ['a', 'b', 'c', 'd'])\n\n def testEquals(self):\n\n class MyType(extension_type.ExtensionType):\n values: ops.Tensor\n score: ops.Tensor\n flavor: str\n\n x1 = MyType([1, 2], 8, 'blue')\n x2 = MyType([1, 2], 8, 'blue')\n y = MyType([1, 2], 8, 'red')\n z = MyType([1, 2], 7, 'blue')\n self.assertAllEqual(x1 == x2, True)\n self.assertAllEqual(x1 != x2, False)\n self.assertAllEqual(x1 == y, False)\n self.assertAllEqual(x1 != y, True)\n self.assertAllEqual(x1 == z, False)\n self.assertAllEqual(y == z, False)\n\n # These are not equal, even though their values are broadcast-compatible\n # and elements are all equal when we broadcast. Shapes must match.\n a = MyType([1, 1, 1, 1], 0, 'x')\n b = MyType([[1, 1, 1, 1]], 0, 'x')\n c = MyType([[1, 1], [1, 1]], 0, 'x')\n self.assertAllEqual(a == b, False)\n self.assertAllEqual(a == c, False)\n self.assertAllEqual(b == c, False)\n\n # Test with unknown shapes (executes a different codepath).\n a_ph = replace_tensors_with_placeholders(a)\n b_ph = replace_tensors_with_placeholders(b)\n c_ph = replace_tensors_with_placeholders(c)\n self.assertAllEqual(a_ph == b_ph, False)\n self.assertAllEqual(a_ph == c_ph, False)\n self.assertAllEqual(b_ph == c_ph, False)\n\n def testPassIntoTfFunction(self):\n\n @def_function.function\n def fn(x):\n return x.with_default(99)\n\n mt = MaskedTensorV2([1, 2, 3, 4], [True, True, False, True])\n self.assertAllEqual([1, 2, 99, 4], fn(mt))\n self.assertAllEqual([1, 2, 99, 4], fn(extension_type.pack(mt)))\n\n def testReturnFromTfFunction(self):\n\n @def_function.function\n def mask_neg_values(x):\n return MaskedTensorV2(x, x > 0)\n\n @def_function.function\n def mask_neg_values_packed(x):\n return extension_type.pack(MaskedTensorV2(x, x > 0))\n\n expected = MaskedTensorV2([5, 8, -3, 9], [True, True, False, True])\n\n actual1 = mask_neg_values(constant_op.constant([5, 8, -3, 9]))\n self.assertIsInstance(actual1, MaskedTensorV2)\n self.assertAllEqual(expected.values, actual1.values)\n self.assertAllEqual(expected.mask, actual1.mask)\n\n actual2 = mask_neg_values_packed(constant_op.constant([5, 8, -3, 9]))\n self.assertIsInstance(actual2, MaskedTensorV2)\n self.assertTrue(extension_type.is_packed(actual2))\n self.assertAllEqual(expected.values, actual2.values)\n self.assertAllEqual(expected.mask, actual2.mask)\n\n def testCaptureByTfFunction(self):\n x = MaskedTensorV2(\n values=[[1, 2, 3], [4, 5, 6]],\n mask=[[True, True, True], [True, False, True]])\n\n @def_function.function\n def add_to_x(y):\n return MaskedTensorV2(x.values + y.values, x.mask & y.mask)\n\n actual = add_to_x(MaskedTensorV2([10, 20, 30], [False, True, True]))\n expected = MaskedTensorV2(\n values=[[11, 22, 33], [14, 25, 36]],\n mask=[[False, True, True], [False, False, True]])\n self.assertIsInstance(actual, MaskedTensorV2)\n self.assertAllEqual(expected.values, actual.values)\n self.assertAllEqual(expected.mask, actual.mask)\n\n def testTfFunctionArgMutationError(self):\n\n @def_function.function\n def fn_with_side_effect(mts):\n mts.append(MaskedTensorV1(mts[0].values * 2, mts[0].mask))\n\n with self.assertRaisesRegex(ValueError, 'should not modify'):\n fn_with_side_effect([MaskedTensorV1([10, 20, 30], [False, True, True])])\n\n def testNestPackUnpack(self):\n\n class CandyStore(extension_type.ExtensionType):\n name: ops.Tensor\n prices: typing.Mapping[str, ops.Tensor]\n\n store = CandyStore('Yum', {'gum': [0.42, 0.48], 'chocolate': [0.83, 1.02]})\n components = nest.flatten(store, expand_composites=True)\n repacked_1 = nest.pack_sequence_as(\n store, components, expand_composites=True)\n repacked_2 = nest.pack_sequence_as(\n store._type_spec, components, expand_composites=True)\n\n # Note: dicts get sorted by key.\n self.assertLen(components, 3)\n self.assertAllEqual(components[0], b'Yum')\n self.assertAllClose(components[1], [0.83, 1.02])\n self.assertAllClose(components[2], [0.42, 0.48])\n\n for repacked in [repacked_1, repacked_2]:\n self.assertAllEqual(repacked.name, b'Yum')\n self.assertAllClose(repacked.prices['gum'], [0.42, 0.48])\n self.assertAllClose(repacked.prices['chocolate'], [0.83, 1.02])\n\n def testSimpleCond(self):\n x = MaskedTensorV1([1, 2, 3, 4], [True, False, True, False])\n y = MaskedTensorV1([5, 6, 7, 8], [False, True, True, False])\n\n x_2 = control_flow_ops.cond(\n constant_op.constant(True), lambda: x, lambda: y)\n y_2 = control_flow_ops.cond(\n constant_op.constant(False), lambda: x, lambda: y)\n\n self.assertAllEqual(x.values, x_2.values)\n self.assertAllEqual(x.mask, x_2.mask)\n self.assertAllEqual(y.values, y_2.values)\n self.assertAllEqual(y.mask, y_2.mask)\n\n def testComplexCond(self):\n mt = MaskedTensorV1([1, 2, 3, 4], [True, False, True, False])\n\n def true_fn():\n return MaskedTensorV1(\n array_ops.where_v2(mt.mask, mt.values, -1), mt.values > 3)\n\n def false_fn():\n return MaskedTensorV1(\n array_ops.where_v2(mt.mask, 100, mt.values * 2),\n math_ops.logical_not(mt.mask))\n\n x = control_flow_ops.cond(constant_op.constant(True), true_fn, false_fn)\n y = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)\n\n self.assertAllEqual(x.values, [1, -1, 3, -1])\n self.assertAllEqual(x.mask, [False, False, False, True])\n self.assertAllEqual(y.values, [100, 4, 100, 8])\n self.assertAllEqual(y.mask, [False, True, False, True])\n\n def testCondAutograph(self):\n\n @def_function.function\n def fn(mt):\n if mt.values[3] > 3:\n return MaskedTensorV1(\n array_ops.where_v2(mt.mask, mt.values, -1), mt.values > 3)\n else:\n return MaskedTensorV1(\n array_ops.where_v2(mt.mask, 100, mt.values * 2), not mt.mask)\n\n x = fn(MaskedTensorV1([1, 2, 3, 4], [True, False, True, False]))\n self.assertAllEqual(x.values, [1, -1, 3, -1])\n self.assertAllEqual(x.mask, [False, False, False, True])\n\n def testCondTypeMismatch(self):\n if context.executing_eagerly:\n # In eager mode, tf.cond eagerly runs either true_fn or false_fn, and\n # ignores the other one; so it doesn't detect any type mismatches\n # between the two outcomes. (See _eager_cond_implementation in\n # control_flow_ops.py.)\n return\n\n a = lambda: MaskedTensorV1([1, 2, 3], [True, True, False])\n b = lambda: MaskedTensorV1(['a', 'b', 'c'], [False, True, True])\n c = lambda: MaskedTensorV2([4, 5, 6], [True, True, False])\n d = lambda: constant_op.constant([7, 8, 9])\n\n with self.assertRaisesRegex(\n ValueError,\n 'Incompatible return values of true_fn and false_fn: The two '\n \"structures don't have the same nested structure\"):\n control_flow_ops.cond(constant_op.constant(True), a, b)\n with self.assertRaisesRegex(\n TypeError, 'Incompatible return types of true_fn and false_fn: The two '\n \"structures don't have the same nested structure\"):\n control_flow_ops.cond(constant_op.constant(True), a, c)\n with self.assertRaisesRegex(\n ValueError,\n 'Incompatible return values of true_fn and false_fn: The two '\n \"structures don't have the same nested structure\"):\n control_flow_ops.cond(constant_op.constant(True), a, d)\n\n def testCondPacked(self):\n x = MaskedTensorV2([1, 2, 3, 4], [True, False, True, False])\n y = MaskedTensorV2([5, 6, 7, 8], [False, True, True, False])\n x = extension_type.pack(x)\n y = extension_type.pack(y)\n\n x_2 = control_flow_ops.cond(\n constant_op.constant(True), lambda: x, lambda: y)\n y_2 = control_flow_ops.cond(\n constant_op.constant(False), lambda: x, lambda: y)\n\n self.assertAllEqual(x.values, x_2.values)\n self.assertAllEqual(x.mask, x_2.mask)\n self.assertAllEqual(y.values, y_2.values)\n self.assertAllEqual(y.mask, y_2.mask)\n\n a = MaskedTensorV2([1, 2, 3, 4], [True, False, True, False])\n b = extension_type.pack(a)\n b = control_flow_ops.cond(\n constant_op.constant(True), lambda: array_ops.size(a.mask),\n lambda: array_ops.size(a.values))\n self.assertAllEqual(b, 4)\n\n # Note: the following example would fail (with `Retval[0] does not have a\n # value`) if `ExtensionType.__getattr__` cached the results of unpacking\n # the value. See the comment in `ExtensionType.__getattr__` for details.\n c = MaskedTensorV2([1, 2, 3, 4], [True, False, True, False])\n c = extension_type.pack(c)\n d = control_flow_ops.cond(\n constant_op.constant(False), lambda: array_ops.size(c.mask),\n lambda: array_ops.size(c.values))\n self.assertAllEqual(d, 4)\n\n def testWhileLoop(self):\n x = MaskedTensorV1([1, 2, 3, 4], [True, False, True, False])\n\n cond = lambda i, x: i < 10\n body = lambda i, x: (i + 1, MaskedTensorV1(x.values * 2, x.mask))\n _, y = control_flow_ops.while_loop_v2(cond, body, [0, x])\n\n self.assertIsInstance(y, MaskedTensorV1)\n self.assertAllEqual(y.values, [1024, 2048, 3072, 4096])\n self.assertAllEqual(y.mask, [True, False, True, False])\n\n def testWhileLoopAutograph(self):\n\n @def_function.function\n def fn(x, n):\n for _ in math_ops.range(n):\n x = MaskedTensorV1(x.values * 2, x.mask)\n return x\n\n y = fn(MaskedTensorV1([1, 2, 3, 4], [True, False, True, False]), 10)\n self.assertIsInstance(y, MaskedTensorV1)\n self.assertAllEqual(y.values, [1024, 2048, 3072, 4096])\n self.assertAllEqual(y.mask, [True, False, True, False])\n\n def testWhileLoopTypeMismatch(self):\n x = MaskedTensorV1([1, 2, 3, 4], [True, False, True, False])\n\n cond = lambda i, x: i < 10\n\n def body(i, x):\n if isinstance(x, MaskedTensorV1):\n return x.values * 2\n else:\n return MaskedTensorV1(x, x > i)\n\n with self.assertRaisesRegex(\n ValueError, \"The two structures don't have the same nested structure\"):\n control_flow_ops.while_loop_v2(cond, body, [0, x])\n\n def testWhileLoopPacked(self):\n x = MaskedTensorV2([1, 2, 3, 4], [True, False, True, False])\n x = extension_type.pack(x)\n cond = lambda i, x: i < 10\n\n def body(i, x):\n return i + 1, extension_type.pack(MaskedTensorV2(x.values * 2, x.mask))\n\n _, y = control_flow_ops.while_loop_v2(cond, body, [0, x])\n self.assertIsInstance(y, MaskedTensorV2)\n self.assertAllEqual(y.values, [1024, 2048, 3072, 4096])\n self.assertAllEqual(y.mask, [True, False, True, False])\n\n def testNestedFields(self):\n PossiblyRaggedTensor = typing.Union[ops.Tensor, ragged_tensor.RaggedTensor]\n ToyFeatures = typing.Mapping[str, PossiblyRaggedTensor]\n\n class ToyInfo(extension_type.ExtensionType):\n version: str\n toys: typing.Tuple[typing.Tuple[str, ops.Tensor, ToyFeatures], ...]\n boxes: typing.Mapping[str, ops.Tensor]\n\n authors = [[b'A', b'Aardvark'], [b'Z', b'Zhook']]\n toys = [('car', 1.0, {\n 'size': [8, 3, 2],\n 'color': [0.3, 0.2, 0.8]\n }), ('book', 3.7, {\n 'authors': ragged_factory_ops.constant(authors)\n })]\n boxes = {'green': ['car'], 'blue': ['car', 'book', 'book']}\n toy_info = ToyInfo(version='1.0 alpha', toys=toys, boxes=boxes)\n\n self.assertEqual(toy_info.version, '1.0 alpha')\n self.assertEqual(toy_info.toys[0][0], 'car')\n self.assertIsInstance(toy_info.toys[0][1], ops.Tensor)\n self.assertAllEqual(toy_info.toys[0][1], 1.0)\n self.assertEqual(set(toy_info.toys[0][2].keys()), {'size', 'color'})\n self.assertIsInstance(toy_info.toys[0][2]['size'], ops.Tensor)\n self.assertAllEqual(toy_info.toys[0][2]['size'], [8, 3, 2])\n self.assertIsInstance(toy_info.toys[1][2]['authors'],\n ragged_tensor.RaggedTensor)\n self.assertAllEqual(toy_info.toys[1][2]['authors'], authors)\n self.assertAllEqual(toy_info.boxes['green'], [b'car'])\n self.assertAllEqual(toy_info.boxes['blue'], ['car', 'book', 'book'])\n\n expected_repr = (\n r\"ToyInfo\\(version='1.0 alpha', toys=\\(\"\n r\"\\('car', <tf.Tensor[^>]*>, ImmutableDict\\(\"\n r\"{'size': <tf.Tensor[^>]*>, 'color': <tf.Tensor[^>]*>}\\)\\), \"\n r\"\\('book', <tf.Tensor[^>]*>, ImmutableDict\\(\"\n r\"{'authors': (<tf.RaggedTensor[^>]*>|tf.RaggedTensor\\(.*\\))}\\)\\)\\), \"\n r'boxes=ImmutableDict\\('\n r\"{'green': <tf.Tensor[^>]*>, 'blue': <tf.Tensor[^>]*>}\\)\\)\")\n\n self.assertRegex(repr(toy_info), expected_repr)\n\n def testNestedExtensionTypes(self):\n PossiblyMaskedTensor = typing.Union[ops.Tensor, MaskedTensorV1]\n\n class Toy(extension_type.ExtensionType):\n name: str\n price: ops.Tensor\n features: typing.Mapping[str, PossiblyMaskedTensor]\n\n class Box(extension_type.ExtensionType):\n contents: ops.Tensor\n\n class ToyInfo(extension_type.ExtensionType):\n version: str\n toys: typing.Tuple[Toy, ...]\n boxes: typing.Mapping[str, Box]\n\n authors = MaskedTensorV1(\n values=[[b'A', b'Quincy', b'Aardvark'], [b'Z', b'Zhook', b'']],\n mask=[[True, True, True], [True, True, False]])\n toys = [\n Toy('car', 1.0, {\n 'size': [8, 3, 2],\n 'color': [0.3, 0.2, 0.8]\n }),\n Toy(name='book', price=3.7, features={'authors': authors})\n ]\n boxes = {\n 'green': Box(['car']),\n 'blue': Box(contents=['car', 'book', 'book'])\n }\n toy_info = ToyInfo(version='1.0 alpha', toys=toys, boxes=boxes)\n\n @def_function.function\n def fn(info):\n prices = [toy.price for toy in info.toys]\n return math_ops.reduce_sum(array_ops.stack(prices))\n\n self.assertAllClose(fn(toy_info), 4.7)\n\n def testNestedCustomConstructor(self):\n\n class Toy(extension_type.ExtensionType):\n name: str\n price: ops.Tensor\n\n def __init__(self, name, price, discount=0):\n if discount:\n name += ' (discounted)'\n price *= (1 - discount)\n self.name = name\n self.price = price\n\n class ToyBox(extension_type.ExtensionType):\n toys: typing.Tuple[Toy, ...]\n\n def __init__(self, name_to_price, name_to_discount):\n self.toys = [\n Toy(name, price, name_to_discount.get(name, 0))\n for (name, price) in name_to_price.items()\n ]\n\n toy_box = ToyBox({\n 'car': 8.3,\n 'truck': 5.9,\n 'puzzle': 5.3,\n 'jacks': 2.8\n }, {\n 'puzzle': .2,\n 'truck': .3\n })\n self.assertLen(toy_box.toys, 4)\n self.assertEqual(\n set(toy.name for toy in toy_box.toys),\n {'car', 'truck (discounted)', 'puzzle (discounted)', 'jacks'})\n\n def testExtensionTypeWithMathOperators(self):\n\n def masked_add(x, y, name=None):\n del name\n if not isinstance(x, MaskedTensorV2) and isinstance(y, MaskedTensorV2):\n return dispatch.OpDispatcher.NOT_SUPPORTED\n return MaskedTensorV2(x.values + y.values, x.mask & y.mask)\n\n with temporarily_add_dispatch(math_ops.add, MaskedTensorV2, masked_add):\n x = MaskedTensorV2([[1, 2], [3, 4]], [[True, False], [True, True]])\n y = MaskedTensorV2([[3, 4], [5, 6]], [[True, True], [False, True]])\n z = x + y\n self.assertAllEqual(z.values, [[4, 6], [8, 10]])\n self.assertAllEqual(z.mask, [[True, False], [False, True]])\n\n def testGetExtensionTypeFields(self):\n\n # Can be called on a type or an instance:\n fields_1 = MaskedTensorV1._tf_extension_type_fields()\n fields_2 = MaskedTensorV1([0], [True])._tf_extension_type_fields()\n\n for fields in [fields_1, fields_2]:\n self.assertLen(fields, 2)\n self.assertEqual(fields[0].name, 'values')\n self.assertEqual(fields[0].value_type, ops.Tensor)\n self.assertEqual(fields[0].default, fields[0].NO_DEFAULT)\n self.assertEqual(fields[1].name, 'mask')\n self.assertEqual(fields[1].value_type,\n tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool))\n self.assertEqual(fields[1].default, fields[0].NO_DEFAULT)\n\n def testHasExtensionTypeField(self):\n\n self.assertTrue(MaskedTensorV1._tf_extension_type_has_field('values'))\n self.assertTrue(MaskedTensorV1._tf_extension_type_has_field('mask'))\n self.assertFalse(MaskedTensorV1._tf_extension_type_has_field('labels'))\n\n mt = MaskedTensorV1([0], [True])\n self.assertTrue(mt._tf_extension_type_has_field('values'))\n self.assertTrue(mt._tf_extension_type_has_field('mask'))\n self.assertFalse(mt._tf_extension_type_has_field('labels'))\n\n def testForwardReferences(self):\n A, B = ForwardRefA, ForwardRefB\n\n self.assertEqual(A._tf_extension_type_fields(),\n (extension_type_field.ExtensionTypeField(\n 'x', typing.Tuple[typing.Union[A, B], ...]),\n extension_type_field.ExtensionTypeField('y', B)))\n self.assertEqual(B._tf_extension_type_fields(),\n (extension_type_field.ExtensionTypeField('z', B),\n extension_type_field.ExtensionTypeField('n', ops.Tensor)))\n\n # Check the signature.\n expected_parameters = [\n tf_inspect.Parameter('self',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter(\n 'x',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=typing.Tuple[typing.Union['ForwardRefA', 'ForwardRefB'],\n ...]),\n tf_inspect.Parameter(\n 'y',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation='ForwardRefB'),\n ]\n expected_sig = tf_inspect.Signature(\n expected_parameters, return_annotation=A)\n self.assertEqual(tf_inspect.signature(A.__init__), expected_sig)\n\n def testUnresolvedForwardReference(self):\n\n class Broken(extension_type.ExtensionType):\n x: 'Cra' # note: intentional typo for Car.\n\n class Car(extension_type.ExtensionType):\n speed: float\n\n with self.assertRaises(TypeError):\n Broken(x=Car(3.8))\n\n def testUnsupportedAnnotations(self):\n with self.assertRaisesRegex(\n TypeError, \"In field 'values': Unsupported type annotation\"):\n\n class MyType1(extension_type.ExtensionType): # pylint: disable=unused-variable\n values: typing.List[ops.Tensor]\n\n with self.assertRaisesRegex(TypeError,\n \"In field 'xyz': Unsupported type annotation\"):\n\n class MyType2(extension_type.ExtensionType): # pylint: disable=unused-variable\n xyz: typing.Union[typing.Tuple[complex, ...], int]\n\n def testExtensionTypeBaseClassHasNoSpec(self):\n self.assertFalse(hasattr(extension_type.ExtensionType, 'Spec'))\n\n def testExtensionTypeBaseConstructorRaisesException(self):\n with self.assertRaisesRegex(AssertionError,\n 'ExtensionType is an abstract base class.'):\n extension_type.ExtensionType()\n\n class ExtensionTypeWithName(extension_type.ExtensionType):\n __name__ = 'tf.__test__.ExtensionTypeWithName' # For SavedModel\n x: typing.Tuple[ops.Tensor, int]\n y: ops.Tensor\n\n def testSavedModelSupport(self):\n\n class TestModule(module.Module):\n\n @def_function.function\n def f(self, s):\n return s.x[0] + s.x[1] + s.y\n\n s1 = self.ExtensionTypeWithName((1, 2), 3)\n s2 = self.ExtensionTypeWithName((1.0, 2), [3.0, 4.0])\n\n m = TestModule()\n m.f.get_concrete_function(s1)\n m.f.get_concrete_function(s2)\n\n path = tempfile.mkdtemp(prefix=test.get_temp_dir())\n save.save(m, path)\n loaded = load.load(path)\n\n self.assertAllEqual(loaded.f(s1), 6)\n self.assertAllEqual(loaded.f(s2), [6.0, 7.0])\n\n def testPackedEncoding(self):\n mt1 = MaskedTensorV2([1, 2, 3, 4], [True, True, False, True])\n self.assertLen(nest.flatten(mt1, expand_composites=True), 2)\n\n mt2 = extension_type.pack(mt1)\n self.assertLen(nest.flatten(mt2, expand_composites=True), 1)\n self.assertIsInstance(mt2.values, ops.Tensor)\n self.assertAllEqual(mt2.values, [1, 2, 3, 4])\n self.assertIsInstance(mt2.mask, ops.Tensor)\n self.assertAllEqual(mt2.mask, [True, True, False, True])\n\n mt3 = extension_type.unpack(mt2)\n self.assertLen(nest.flatten(mt3, expand_composites=True), 2)\n self.assertIsInstance(mt3.values, ops.Tensor)\n self.assertAllEqual(mt3.values, [1, 2, 3, 4])\n self.assertIsInstance(mt3.mask, ops.Tensor)\n self.assertAllEqual(mt3.mask, [True, True, False, True])\n\n nest.assert_same_structure(mt1, mt3, expand_composites=True)\n with self.assertRaisesRegex(ValueError, \"don't have the same\"): # pylint: disable=g-error-prone-assert-raises\n nest.assert_same_structure(mt1, mt2, expand_composites=True)\n\n mt4 = MaskedTensorV1([1, 2, 3, 4], [True, True, False, True])\n with self.assertRaisesRegex(\n ValueError,\n 'ExtensionTypes must have a __name__ field in order to be packed.'):\n extension_type.pack(mt4)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ExtensionTypeSpecTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def testSpecConstructor(self):\n values_spec = tensor_spec.TensorSpec([4], dtypes.float32)\n mask_spec = tensor_spec.TensorSpec([4], dtypes.bool)\n mt_spec = MaskedTensorV1.Spec(values_spec, mask_spec)\n self.assertEqual(mt_spec.values, values_spec)\n self.assertEqual(mt_spec.mask, mask_spec)\n\n mt = MaskedTensorV1([1.0, 2.0, 3.0, 4.0], [True, True, False, True])\n self.assertEqual(mt._type_spec, mt_spec)\n\n def testSpecConstructorSignature(self):\n\n class MyType(extension_type.ExtensionType):\n x: ops.Tensor\n y: tensor_spec.TensorSpec(shape=None, dtype=dtypes.bool)\n z: typing.Tuple[typing.Union[int, str], ...] = [1, 'two', 3]\n\n expected_parameters = [\n tf_inspect.Parameter('self',\n tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter('x', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter('y', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n tf_inspect.Parameter('z', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD),\n ]\n expected_sig = tf_inspect.Signature(\n expected_parameters, return_annotation=MyType.Spec)\n self.assertEqual(expected_sig, tf_inspect.signature(MyType.Spec.__init__))\n\n def testSpecAttributesAreImmutable(self):\n mt = MaskedTensorV1([1, 2, 3, 4], [True, True, False, True])\n mt_spec = MaskedTensorV1.Spec.from_value(mt)\n with self.assertRaisesRegex(\n AttributeError, 'Cannot mutate attribute `score` '\n 'outside the custom constructor of ExtensionTypeSpec'):\n mt_spec.score = 12\n with self.assertRaisesRegex(\n AttributeError, 'Cannot mutate attribute `values` '\n 'outside the custom constructor of ExtensionTypeSpec'):\n mt_spec.values = constant_op.constant([4, 3, 2, 1])\n with self.assertRaisesRegex(\n AttributeError, 'Cannot mutate attribute `values` '\n 'outside the custom constructor of ExtensionTypeSpec'):\n del mt_spec.values\n\n def testSpecFromValue(self):\n mt = MaskedTensorV1([1.0, 2.0, 3.0, 4.0], [True, True, False, True])\n mt_spec = MaskedTensorV1.Spec.from_value(mt)\n\n expected_values_spec = tensor_spec.TensorSpec([4], dtypes.float32)\n expected_mask_spec = tensor_spec.TensorSpec([4], dtypes.bool)\n self.assertEqual(mt_spec.values, expected_values_spec)\n self.assertEqual(mt_spec.mask, expected_mask_spec)\n\n def testSpecSerialize(self):\n\n class Zoo(extension_type.ExtensionType):\n zookeepers: typing.Tuple[str, ...]\n animals: typing.Mapping[str, typing.Mapping[str, ops.Tensor]]\n\n featurespec = {\n 'size': tensor_spec.TensorSpec([3]),\n 'weight': tensor_spec.TensorSpec([])\n }\n zoo_spec = Zoo.Spec(\n zookeepers=['Zoey', 'Zack'],\n animals={\n 'tiger': featurespec,\n 'elephant': featurespec\n })\n\n serialized = zoo_spec._serialize()\n self.assertEqual(serialized,\n (('zookeepers', ('Zoey', 'Zack')), ('animals', {\n 'tiger': featurespec,\n 'elephant': featurespec\n })))\n restored = Zoo.Spec._deserialize(serialized)\n self.assertEqual(zoo_spec, restored)\n\n # ImmutableDict is used for the field, but dict for the serialization:\n self.assertIsInstance(zoo_spec.animals, immutable_dict.ImmutableDict)\n serialized_field_name, serialized_field_value = serialized[1]\n self.assertEqual(serialized_field_name, 'animals')\n self.assertIsInstance(serialized_field_value, dict)\n\n def testSpecComponents(self):\n\n class Zoo(extension_type.ExtensionType):\n zookeepers: typing.Tuple[str, ...]\n animals: typing.Mapping[str, typing.Mapping[str, ops.Tensor]]\n\n zoo = Zoo(\n ['Zoey', 'Zack'], {\n 'elephant': {\n 'size': [25, 30, 20],\n 'weight': 2000.0\n },\n 'tiger': {\n 'hunger': 3.2,\n 'size': [3, 8, 2],\n 'weight': 87.3\n }\n })\n zoo_spec = Zoo.Spec.from_value(zoo)\n\n components = zoo_spec._to_components(zoo)\n self.assertLen(components, 5)\n self.assertAllClose(components[0], [25, 30, 20])\n self.assertAllClose(components[1], 2000.0)\n self.assertAllClose(components[2], 3.2)\n self.assertAllClose(components[3], [3, 8, 2])\n self.assertAllClose(components[4], 87.3)\n\n restored = zoo_spec._from_components(components)\n self.assertAllEqual(zoo == restored, True)\n\n self.assertEqual(zoo_spec._component_specs,\n (tensor_spec.TensorSpec([3], dtypes.int32),\n tensor_spec.TensorSpec([], dtypes.float32),\n tensor_spec.TensorSpec([], dtypes.float32),\n tensor_spec.TensorSpec([3], dtypes.int32),\n tensor_spec.TensorSpec([], dtypes.float32)))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass AnonymousExtensionTypeTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n @parameterized.parameters([\n [dict(i=5, f=3.2, b=True, n=None)],\n [dict(x=(1, 2), y={\n 3: 4,\n 5: 6\n })],\n [lambda: dict(t=constant_op.constant(123))],\n [lambda: dict(r=ragged_factory_ops.constant([[1, 2], [3]]))],\n ])\n def testConstruction(self, fields):\n if callable(fields):\n fields = fields()\n extension_type.AnonymousExtensionType(**fields)\n\n @parameterized.parameters([\n [dict(x=[1, 2, 3]), 'unsupported `value` argument'],\n [dict(x=set([1, 2])), 'unsupported `value` argument'],\n [dict(x=(1, dict([(2, [])]))), 'unsupported `value` argument'],\n [\n dict(_tf_extension_type_xyz=5),\n 'Reserved field name .*_tf_extension_type_xyz.*'\n ],\n ])\n def testConstructionErrors(self, fields, error):\n with self.assertRaisesRegex(ValueError, error):\n extension_type.AnonymousExtensionType(**fields)\n\n @parameterized.parameters([\n [dict(i=5, f=3.2, b=True, n=None)],\n [dict(x=(1, 2), y={\n 3: 4,\n 5: 6\n })],\n [lambda: dict(t=constant_op.constant(123))],\n [lambda: dict(r=ragged_factory_ops.constant([[1, 2], [3]]))],\n ])\n def testAttributeAccessors(self, fields):\n if callable(fields):\n fields = fields()\n s = extension_type.AnonymousExtensionType(**fields)\n for (name, value) in fields.items():\n actual = getattr(s, name)\n if isinstance(actual, (ops.Tensor, ragged_tensor.RaggedTensor)):\n self.assertAllEqual(actual, value)\n else:\n self.assertEqual(actual, value)\n\n def testAttributeAccessorsAreImmutable(self):\n s = extension_type.AnonymousExtensionType(x=12, y={'x': 55})\n with self.assertRaisesRegex(AttributeError, 'Cannot set attribute `x`'):\n s.x = 22\n with self.assertRaisesRegex(AttributeError, 'Cannot delete attribute `y`'):\n del s.y\n with self.assertRaisesRegex(TypeError, 'does not support item assignment'):\n s.y['x'] = 66\n\n def testReinterpret(self):\n x = MaskedTensorV2([4, 5], [True, False])\n anon_x = extension_type.reinterpret(x,\n extension_type.AnonymousExtensionType)\n self.assertAllEqual(anon_x.values, [4, 5])\n self.assertAllEqual(anon_x.mask, [True, False])\n\n round_trip_x = extension_type.reinterpret(anon_x, MaskedTensorV2)\n self.assertAllEqual(round_trip_x.values, [4, 5])\n self.assertAllEqual(round_trip_x.mask, [True, False])\n\n converted_x = extension_type.reinterpret(anon_x, MaskedTensorV1)\n self.assertAllEqual(converted_x.values, [4, 5])\n self.assertAllEqual(converted_x.mask, [True, False])\n\n # pylint: disable=g-long-lambda\n @parameterized.parameters([\n [\n lambda: extension_type.AnonymousExtensionType(\n values=constant_op.constant([1, 2, 3])), MaskedTensorV2,\n \"Missing required fields: {'mask'}\"\n ],\n [\n lambda: extension_type.AnonymousExtensionType(\n values=(1, 2, 3), mask=None), MaskedTensorV2,\n 'mask: expected a tf.bool Tensor, got None'\n ],\n [\n lambda: extension_type.AnonymousExtensionType(\n values=constant_op.constant([[1, 2], [3, 4]]),\n mask=ragged_factory_ops.constant([[1, 2], [3]])), MaskedTensorV2,\n 'mask: expected a tf.bool Tensor'\n ],\n [\n lambda: extension_type.AnonymousExtensionType(\n values=constant_op.constant([1, 2, 3]),\n mask=constant_op.constant([True, False])), MaskedTensorV2,\n 'Shapes .* are incompatible'\n ],\n [\n lambda: extension_type.AnonymousExtensionType(\n values=constant_op.constant([1, 2, 3])), ops.Tensor,\n 'reinterpret expects `new_type` to be a subclass of '\n 'tf.ExtensionType; '\n 'got .*.Tensor.*'\n ],\n [\n lambda: constant_op.constant([1, 2, 3]),\n extension_type.AnonymousExtensionType,\n 'reinterpret expects `value` to be a tf.ExtensionType instance; '\n 'got.*.Tensor.*'\n ],\n ])\n def testReinterpretErrors(self, value, new_type, error):\n if callable(value):\n value = value()\n with self.assertRaisesRegex((TypeError, ValueError), error):\n extension_type.reinterpret(value, new_type)\n\n def testLoadSavedModelWithUnregisteredExtensionType(self):\n\n def f(x, y):\n x_values = x.values if isinstance(x, MaskedTensorV1) else x\n y_values = y.values if isinstance(y, MaskedTensorV1) else y\n x_mask = x.mask if isinstance(x, MaskedTensorV1) else True\n y_mask = y.mask if isinstance(y, MaskedTensorV1) else True\n return MaskedTensorV1(x_values + y_values, x_mask & y_mask)\n\n t_spec = tensor_spec.TensorSpec(None, dtypes.int32)\n b_spec = tensor_spec.TensorSpec(None, dtypes.bool)\n mt_spec = MaskedTensorV1.Spec(values=t_spec, mask=b_spec)\n model = module.Module()\n model.f = def_function.function(f)\n model.f.get_concrete_function(t_spec, t_spec)\n model.f.get_concrete_function(t_spec, mt_spec)\n model.f.get_concrete_function(mt_spec, t_spec)\n model.f.get_concrete_function(mt_spec, mt_spec)\n\n path = tempfile.mkdtemp(prefix=test.get_temp_dir())\n with temporarily_register_type_spec('tf.test.MaskedTensorV1.Spec',\n MaskedTensorV1.Spec):\n save.save(model, path)\n loaded_model = load.load(path)\n\n with self.assertRaises(ValueError):\n type_spec.lookup('tf.test.MaskedTensorV1')\n\n t = constant_op.constant([10, 20, 30])\n v1 = loaded_model.f(t, t)\n self.assertIsInstance(v1, extension_type.AnonymousExtensionType)\n self.assertAllEqual(v1.values, [20, 40, 60])\n self.assertAllEqual(v1.mask, True)\n\n v2 = loaded_model.f(v1, v1)\n self.assertIsInstance(v2, extension_type.AnonymousExtensionType)\n self.assertAllEqual(v2.values, [40, 80, 120])\n self.assertAllEqual(v2.mask, True)\n\n mt = MaskedTensorV1([1, 2, 3], [True, True, False])\n v3 = loaded_model.f(\n t, extension_type.reinterpret(mt,\n extension_type.AnonymousExtensionType))\n self.assertIsInstance(v3, extension_type.AnonymousExtensionType)\n self.assertAllEqual(v3.values, [11, 22, 33])\n self.assertAllEqual(v3.mask, [True, True, False])\n\n v4 = extension_type.reinterpret(v3, MaskedTensorV1)\n self.assertIsInstance(v4, MaskedTensorV1)\n self.assertAllEqual(v4.values, [11, 22, 33])\n self.assertAllEqual(v4.mask, [True, True, False])\n\n\ndef replace_tensors_with_placeholders(value):\n\n def repl(x):\n if isinstance(x, ops.Tensor):\n return array_ops.placeholder_with_default(x, shape=None)\n else:\n return x\n\n return nest.map_structure(repl, value, expand_composites=True)\n\n\[email protected]\ndef temporarily_add_dispatch(op, typ, fn):\n n = len(op._tf_dispatchers)\n dispatch.dispatch_for_types(op, typ)(fn)\n yield\n assert len(op._tf_dispatchers) == n + 1\n del op._tf_dispatchers[-1]\n\n\[email protected]\ndef temporarily_register_type_spec(name, cls):\n \"\"\"Context manager for making temporary changes to the TypeSpec registry.\"\"\"\n type_spec.register(name)(cls)\n yield\n assert type_spec._TYPE_SPEC_TO_NAME.pop(cls) == name\n assert type_spec._NAME_TO_TYPE_SPEC.pop(name) is cls\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lite.py functionality related to TensorFlow 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nfrom six.moves import zip\n\nfrom tensorflow.lite.python.interpreter import Interpreter\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training.tracking import tracking\n\n\nclass ModelTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n \"\"\"Base test class for TensorFlow Lite 2.x model tests.\"\"\"\n\n def _evaluateTFLiteModel(self, tflite_model, input_data, input_shapes=None):\n \"\"\"Evaluates the model on the `input_data`.\n\n Args:\n tflite_model: TensorFlow Lite model.\n input_data: List of EagerTensor const ops containing the input data for\n each input tensor.\n input_shapes: List of tuples representing the `shape_signature` and the\n new shape of each input tensor that has unknown dimensions.\n\n Returns:\n [np.ndarray]\n \"\"\"\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n if input_shapes:\n for idx, (shape_signature, final_shape) in enumerate(input_shapes):\n self.assertTrue(\n (input_details[idx]['shape_signature'] == shape_signature).all())\n index = input_details[idx]['index']\n interpreter.resize_tensor_input(index, final_shape, strict=True)\n interpreter.allocate_tensors()\n\n output_details = interpreter.get_output_details()\n input_details = interpreter.get_input_details()\n\n for input_tensor, tensor_data in zip(input_details, input_data):\n interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())\n interpreter.invoke()\n return [\n interpreter.get_tensor(details['index']) for details in output_details\n ]\n\n def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, signature_key,\n inputs):\n \"\"\"Evaluates the model on the `inputs`.\n\n Args:\n tflite_model: TensorFlow Lite model.\n signature_key: Signature key.\n inputs: Map from input tensor names in the SignatureDef to tensor value.\n\n Returns:\n Dictionary of outputs.\n Key is the output name in the SignatureDef 'signature_key'\n Value is the output value\n \"\"\"\n interpreter = Interpreter(model_content=tflite_model)\n signature_runner = interpreter.get_signature_runner(signature_key)\n return signature_runner(**inputs)\n\n def _getSimpleVariableModel(self):\n root = tracking.AutoTrackable()\n root.v1 = variables.Variable(3.)\n root.v2 = variables.Variable(2.)\n root.f = def_function.function(lambda x: root.v1 * root.v2 * x)\n return root\n\n def _getSimpleModelWithVariables(self):\n\n class SimpleModelWithOneVariable(tracking.AutoTrackable):\n \"\"\"Basic model with 1 variable.\"\"\"\n\n def __init__(self):\n super(SimpleModelWithOneVariable, self).__init__()\n self.var = variables.Variable(array_ops.zeros((1, 10), name='var'))\n\n @def_function.function\n def assign_add(self, x):\n self.var.assign_add(x)\n return self.var\n\n return SimpleModelWithOneVariable()\n\n def _getMultiFunctionModel(self):\n\n class BasicModel(tracking.AutoTrackable):\n \"\"\"Basic model with multiple functions.\"\"\"\n\n def __init__(self):\n self.y = None\n self.z = None\n\n @def_function.function\n def add(self, x):\n if self.y is None:\n self.y = variables.Variable(2.)\n return x + self.y\n\n @def_function.function\n def sub(self, x):\n if self.z is None:\n self.z = variables.Variable(3.)\n return x - self.z\n\n @def_function.function\n def mul_add(self, x, y):\n if self.z is None:\n self.z = variables.Variable(3.)\n return x * self.z + y\n\n return BasicModel()\n\n def _getMultiFunctionModelWithSharedWeight(self):\n\n class BasicModelWithSharedWeight(tracking.AutoTrackable):\n \"\"\"Model with multiple functions and a shared weight.\"\"\"\n\n def __init__(self):\n self.weight = constant_op.constant([1.0],\n shape=(1, 512, 512, 1),\n dtype=dtypes.float32)\n\n @def_function.function\n def add(self, x):\n return x + self.weight\n\n @def_function.function\n def sub(self, x):\n return x - self.weight\n\n @def_function.function\n def mul(self, x):\n return x * self.weight\n\n return BasicModelWithSharedWeight()\n\n def _assertValidDebugInfo(self, debug_info):\n \"\"\"Verify the DebugInfo is valid.\"\"\"\n file_names = set()\n for file_path in debug_info.files:\n file_names.add(os.path.basename(file_path))\n # To make the test independent on how the nodes are created, we only assert\n # the name of this test file.\n self.assertIn('lite_v2_test.py', file_names)\n self.assertNotIn('lite_test.py', file_names)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Input-pipeline utilities for Distribution strategies.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.experimental.ops import distribute\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops.options import AutoShardPolicy\nfrom tensorflow.python.data.util import traverse\nfrom tensorflow.python.framework import op_def_registry\nfrom tensorflow.python.framework import ops\n\n\n# pylint: disable=protected-access\ndef auto_shard_dataset(dataset, num_shards, index, num_replicas_in_sync=None):\n \"\"\"Shard the input pipeline by sharding the underlying list of files.\n\n Args:\n dataset: A `tf.data.Dataset` instance, typically the result of a bunch of\n dataset transformations.\n num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of\n shards operating in parallel. Same usage as in `tf.data.Dataset.shard`.\n index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.\n Same usage as in `tf.data.Dataset.shard`.\n num_replicas_in_sync: An integer representing the total number of replicas\n across all workers. This is used in the rewrite when sharding by data.\n\n Returns:\n A modified `Dataset` obtained by updating the pipeline sharded by the\n files. The input dataset will be returned if we cannot automatically\n determine a good way to shard the input dataset.\n \"\"\"\n if (dataset.options().experimental_distribute.auto_shard_policy !=\n AutoShardPolicy.OFF):\n if num_replicas_in_sync is None:\n num_replicas_in_sync = 1\n if isinstance(dataset, dataset_ops.DatasetV1):\n return distribute._AutoShardDatasetV1(dataset, num_shards, index,\n num_replicas_in_sync)\n else:\n return distribute._AutoShardDataset(dataset, num_shards, index,\n num_replicas_in_sync)\n else:\n return dataset\n\n\ndef _clone_dataset(dataset):\n \"\"\"Returns a cloned version of `dataset`.\"\"\"\n variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(dataset)\n remap_dict = _clone_helper(dataset._variant_tensor.op, variant_tensor_ops)\n new_variant_tensor = remap_dict[dataset._variant_tensor.op].outputs[0]\n return dataset_ops._VariantDataset(new_variant_tensor, dataset.element_spec)\n\n\ndef _get_op_def(op):\n return op.op_def or op_def_registry.get(op.type)\n\n\ndef _clone_helper(op_to_clone, variant_tensor_ops):\n \"\"\"Helper method that recursively clones `op_to_clone`.\n\n Args:\n op_to_clone: The op we want to clone.\n variant_tensor_ops: A list of ops that we have to clone along the way.\n\n Returns:\n A dictionary mapping old_ops to new_ops created. Includes op_to_clone\n as a key.\n \"\"\"\n remap_dict = {}\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in variant_tensor_ops:\n recursive_map = _clone_helper(input_tensor_op, variant_tensor_ops)\n remap_dict.update(recursive_map)\n inputs_list = []\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in remap_dict:\n remapped_input = remap_dict[input_tensor_op].outputs[0]\n inputs_list.append(remapped_input)\n else:\n inputs_list.append(input_tensor_op.outputs[input_tensor.value_index])\n g = ops.get_default_graph()\n new_op = g.create_op(\n op_to_clone.type,\n inputs_list, [o.dtype for o in op_to_clone.outputs],\n name=op_to_clone.name,\n attrs=op_to_clone.node_def.attr,\n op_def=_get_op_def(op_to_clone))\n remap_dict[op_to_clone] = new_op\n return remap_dict\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helpers for working with signatures in tf.saved_model.save.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import logging\n\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function as defun\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.saved_model import function_serialization\nfrom tensorflow.python.saved_model import revived_types\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.compat import collections_abc\n\n\nDEFAULT_SIGNATURE_ATTR = \"_default_save_signature\"\nSIGNATURE_ATTRIBUTE_NAME = \"signatures\"\n# Max number of warnings to show if signature contains normalized input names.\n_NUM_DISPLAY_NORMALIZED_SIGNATURES = 5\n\n\ndef _get_signature(function):\n if (isinstance(function, (defun.Function, def_function.Function)) and\n function.input_signature is not None):\n function = function._get_concrete_function_garbage_collected() # pylint: disable=protected-access\n if not isinstance(function, defun.ConcreteFunction):\n return None\n return function\n\n\ndef _valid_signature(concrete_function):\n \"\"\"Returns whether concrete function can be converted to a signature.\"\"\"\n if not concrete_function.outputs:\n # Functions without outputs don't make sense as signatures. We just don't\n # have any way to run an Operation with no outputs as a SignatureDef in the\n # 1.x style.\n return False\n try:\n _validate_inputs(concrete_function)\n _normalize_outputs(concrete_function.structured_outputs, \"unused\", \"unused\")\n except ValueError:\n return False\n return True\n\n\ndef _validate_inputs(concrete_function):\n \"\"\"Raises error if input type is tf.Variable.\"\"\"\n if any(isinstance(inp, resource_variable_ops.VariableSpec)\n for inp in nest.flatten(\n concrete_function.structured_input_signature)):\n raise ValueError(\n f\"Unable to serialize concrete_function '{concrete_function.name}'\"\n f\"with tf.Variable input. Functions that expect tf.Variable \"\n \"inputs cannot be exported as signatures.\")\n\n\ndef _get_signature_name_changes(concrete_function):\n \"\"\"Checks for user-specified signature input names that are normalized.\"\"\"\n # Map of {user-given name: normalized name} if the names are un-identical.\n name_changes = {}\n for signature_input_name, graph_input in zip(\n concrete_function.function_def.signature.input_arg,\n concrete_function.graph.inputs):\n try:\n user_specified_name = compat.as_str(\n graph_input.op.get_attr(\"_user_specified_name\"))\n if signature_input_name.name != user_specified_name:\n name_changes[user_specified_name] = signature_input_name.name\n except ValueError:\n # Signature input does not have a user-specified name.\n pass\n return name_changes\n\n\ndef find_function_to_export(saveable_view):\n \"\"\"Function to export, None if no suitable function was found.\"\"\"\n # If the user did not specify signatures, check the root object for a function\n # that can be made into a signature.\n functions = saveable_view.list_functions(saveable_view.root)\n signature = functions.get(DEFAULT_SIGNATURE_ATTR, None)\n if signature is not None:\n return signature\n\n # TODO(andresp): Discuss removing this behaviour. It can lead to WTFs when a\n # user decides to annotate more functions with tf.function and suddenly\n # serving that model way later in the process stops working.\n possible_signatures = []\n for function in functions.values():\n concrete = _get_signature(function)\n if concrete is not None and _valid_signature(concrete):\n possible_signatures.append(concrete)\n if len(possible_signatures) == 1:\n single_function = possible_signatures[0]\n signature = _get_signature(single_function)\n if signature and _valid_signature(signature):\n return signature\n return None\n\n\ndef canonicalize_signatures(signatures):\n \"\"\"Converts `signatures` into a dictionary of concrete functions.\"\"\"\n if signatures is None:\n return {}, {}\n if not isinstance(signatures, collections_abc.Mapping):\n signatures = {\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}\n num_normalized_signatures_counter = 0\n concrete_signatures = {}\n wrapped_functions = {}\n for signature_key, function in signatures.items():\n original_function = signature_function = _get_signature(function)\n if signature_function is None:\n raise ValueError(\n \"Expected a TensorFlow function for which to generate a signature, \"\n f\"but got {function}. Only `tf.functions` with an input signature or \"\n \"concrete functions can be used as a signature.\")\n\n wrapped_functions[original_function] = signature_function = (\n wrapped_functions.get(original_function) or\n function_serialization.wrap_cached_variables(original_function))\n _validate_inputs(signature_function)\n if num_normalized_signatures_counter < _NUM_DISPLAY_NORMALIZED_SIGNATURES:\n signature_name_changes = _get_signature_name_changes(signature_function)\n if signature_name_changes:\n num_normalized_signatures_counter += 1\n logging.warning(\n \"Function `%s` contains input name(s) %s with unsupported \"\n \"characters which will be renamed to %s in the SavedModel.\",\n compat.as_str(signature_function.graph.name),\n \", \".join(signature_name_changes.keys()),\n \", \".join(signature_name_changes.values()))\n # Re-wrap the function so that it returns a dictionary of Tensors. This\n # matches the format of 1.x-style signatures.\n # pylint: disable=cell-var-from-loop\n @def_function.function\n def signature_wrapper(**kwargs):\n structured_outputs = signature_function(**kwargs)\n return _normalize_outputs(\n structured_outputs, signature_function.name, signature_key)\n tensor_spec_signature = {}\n if signature_function.structured_input_signature is not None:\n # The structured input signature may contain other non-tensor arguments.\n inputs = filter(\n lambda x: isinstance(x, tensor_spec.TensorSpec),\n nest.flatten(signature_function.structured_input_signature,\n expand_composites=True))\n else:\n # Structured input signature isn't always defined for some functions.\n inputs = signature_function.inputs\n\n for keyword, inp in zip(\n signature_function._arg_keywords, # pylint: disable=protected-access\n inputs):\n keyword = compat.as_str(keyword)\n if isinstance(inp, tensor_spec.TensorSpec):\n spec = tensor_spec.TensorSpec(inp.shape, inp.dtype, name=keyword)\n else:\n spec = tensor_spec.TensorSpec.from_tensor(inp, name=keyword)\n tensor_spec_signature[keyword] = spec\n final_concrete = signature_wrapper._get_concrete_function_garbage_collected( # pylint: disable=protected-access\n **tensor_spec_signature)\n # pylint: disable=protected-access\n if len(final_concrete._arg_keywords) == 1:\n # If there is only one input to the signature, a very common case, then\n # ordering is unambiguous and we can let people pass a positional\n # argument. Since SignatureDefs are unordered (protobuf \"map\") multiple\n # arguments means we need to be keyword-only.\n final_concrete._num_positional_args = 1\n else:\n final_concrete._num_positional_args = 0\n # pylint: enable=protected-access\n concrete_signatures[signature_key] = final_concrete\n # pylint: enable=cell-var-from-loop\n return concrete_signatures, wrapped_functions\n\n\ndef _normalize_outputs(outputs, function_name, signature_key):\n \"\"\"Normalize outputs if necessary and check that they are tensors.\"\"\"\n # Convert `outputs` to a dictionary (if it's not one already).\n if not isinstance(outputs, collections_abc.Mapping):\n # Check if `outputs` is a namedtuple.\n if hasattr(outputs, \"_asdict\"):\n outputs = outputs._asdict()\n else:\n if not isinstance(outputs, collections_abc.Sequence):\n outputs = [outputs]\n outputs = {(\"output_{}\".format(output_index)): output\n for output_index, output in enumerate(outputs)}\n\n # Check that the keys of `outputs` are strings and the values are Tensors.\n for key, value in outputs.items():\n if not isinstance(key, compat.bytes_or_text_types):\n raise ValueError(\n f\"Got a dictionary with a non-string key {key!r} in the output of \"\n f\"the function {compat.as_str_any(function_name)} used to generate \"\n f\"the SavedModel signature {signature_key!r}.\")\n if not isinstance(value, (ops.Tensor, composite_tensor.CompositeTensor)):\n raise ValueError(\n f\"Got a non-Tensor value {value!r} for key {key!r} in the output of \"\n f\"the function {compat.as_str_any(function_name)} used to generate \"\n f\"the SavedModel signature {signature_key!r}. \"\n \"Outputs for functions used as signatures must be a single Tensor, \"\n \"a sequence of Tensors, or a dictionary from string to Tensor.\")\n return outputs\n\n\n# _SignatureMap is immutable to ensure that users do not expect changes to be\n# reflected in the SavedModel. Using public APIs, tf.saved_model.load() is the\n# only way to create a _SignatureMap and there is no way to modify it. So we can\n# safely ignore/overwrite \".signatures\" attributes attached to objects being\n# saved if they contain a _SignatureMap. A \".signatures\" attribute containing\n# any other type (e.g. a regular dict) will raise an exception asking the user\n# to first \"del obj.signatures\" if they want it overwritten.\nclass _SignatureMap(collections_abc.Mapping, base.Trackable):\n \"\"\"A collection of SavedModel signatures.\"\"\"\n\n def __init__(self):\n self._signatures = {}\n\n def _add_signature(self, name, concrete_function):\n \"\"\"Adds a signature to the _SignatureMap.\"\"\"\n # Ideally this object would be immutable, but restore is streaming so we do\n # need a private API for adding new signatures to an existing object.\n self._signatures[name] = concrete_function\n\n def __getitem__(self, key):\n return self._signatures[key]\n\n def __iter__(self):\n return iter(self._signatures)\n\n def __len__(self):\n return len(self._signatures)\n\n def __repr__(self):\n return \"_SignatureMap({})\".format(self._signatures)\n\n def _list_functions_for_serialization(self, unused_serialization_cache):\n return {\n key: value for key, value in self.items()\n if isinstance(value, (def_function.Function, defun.ConcreteFunction))\n }\n\n\nrevived_types.register_revived_type(\n \"signature_map\",\n lambda obj: isinstance(obj, _SignatureMap),\n versions=[revived_types.VersionedTypeRegistration(\n # Standard dependencies are enough to reconstruct the trackable\n # items in dictionaries, so we don't need to save any extra information.\n object_factory=lambda proto: _SignatureMap(),\n version=1,\n min_producer_version=1,\n min_consumer_version=1,\n setter=_SignatureMap._add_signature # pylint: disable=protected-access\n )])\n\n\ndef create_signature_map(signatures):\n \"\"\"Creates an object containing `signatures`.\"\"\"\n signature_map = _SignatureMap()\n for name, func in signatures.items():\n # This true of any signature that came from canonicalize_signatures. Here as\n # a sanity check on saving; crashing on load (e.g. in _add_signature) would\n # be more problematic in case future export changes violated these\n # assertions.\n assert isinstance(func, defun.ConcreteFunction)\n assert isinstance(func.structured_outputs, collections_abc.Mapping)\n # pylint: disable=protected-access\n if len(func._arg_keywords) == 1:\n assert 1 == func._num_positional_args\n else:\n assert 0 == func._num_positional_args\n signature_map._add_signature(name, func)\n # pylint: enable=protected-access\n return signature_map\n\n\ndef validate_saveable_view(saveable_view):\n \"\"\"Performs signature-related sanity checks on `saveable_view`.\"\"\"\n for name, dep in saveable_view.list_dependencies(\n saveable_view.root):\n if name == SIGNATURE_ATTRIBUTE_NAME:\n if not isinstance(dep, _SignatureMap):\n raise ValueError(\n f\"Exporting an object {saveable_view.root} which has an attribute \"\n f\"named '{SIGNATURE_ATTRIBUTE_NAME}'. This is a reserved attribute \"\n \"used to store SavedModel signatures in objects which come from \"\n \"`tf.saved_model.load`. Delete this attribute \"\n f\"(e.g. `del obj.{SIGNATURE_ATTRIBUTE_NAME}`) before saving if \"\n \"this shadowing is acceptable.\")\n break\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Estimator related util.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import function_utils\n\n\ndef silly_example_function():\n pass\n\n\nclass SillyCallableClass(object):\n\n def __call__(self):\n pass\n\n\nclass FnArgsTest(test.TestCase):\n\n def test_simple_function(self):\n def fn(a, b):\n return a + b\n self.assertEqual(('a', 'b'), function_utils.fn_args(fn))\n\n def test_callable(self):\n\n class Foo(object):\n\n def __call__(self, a, b):\n return a + b\n\n self.assertEqual(('a', 'b'), function_utils.fn_args(Foo()))\n\n def test_bound_method(self):\n\n class Foo(object):\n\n def bar(self, a, b):\n return a + b\n\n self.assertEqual(('a', 'b'), function_utils.fn_args(Foo().bar))\n\n def test_bound_method_no_self(self):\n\n class Foo(object):\n\n def bar(*args): # pylint:disable=no-method-argument\n return args[1] + args[2]\n\n self.assertEqual((), function_utils.fn_args(Foo().bar))\n\n def test_partial_function(self):\n expected_test_arg = 123\n\n def fn(a, test_arg):\n if test_arg != expected_test_arg:\n return ValueError('partial fn does not work correctly')\n return a\n\n wrapped_fn = functools.partial(fn, test_arg=123)\n\n self.assertEqual(('a',), function_utils.fn_args(wrapped_fn))\n\n def test_partial_function_with_positional_args(self):\n expected_test_arg = 123\n\n def fn(test_arg, a):\n if test_arg != expected_test_arg:\n return ValueError('partial fn does not work correctly')\n return a\n\n wrapped_fn = functools.partial(fn, 123)\n\n self.assertEqual(('a',), function_utils.fn_args(wrapped_fn))\n\n self.assertEqual(3, wrapped_fn(3))\n self.assertEqual(3, wrapped_fn(a=3))\n\n def test_double_partial(self):\n expected_test_arg1 = 123\n expected_test_arg2 = 456\n\n def fn(a, test_arg1, test_arg2):\n if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:\n return ValueError('partial does not work correctly')\n return a\n\n wrapped_fn = functools.partial(fn, test_arg2=456)\n double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)\n\n self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))\n\n def test_double_partial_with_positional_args_in_outer_layer(self):\n expected_test_arg1 = 123\n expected_test_arg2 = 456\n\n def fn(test_arg1, a, test_arg2):\n if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:\n return ValueError('partial fn does not work correctly')\n return a\n\n wrapped_fn = functools.partial(fn, test_arg2=456)\n double_wrapped_fn = functools.partial(wrapped_fn, 123)\n\n self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))\n\n self.assertEqual(3, double_wrapped_fn(3)) # pylint: disable=no-value-for-parameter\n self.assertEqual(3, double_wrapped_fn(a=3)) # pylint: disable=no-value-for-parameter\n\n def test_double_partial_with_positional_args_in_both_layers(self):\n expected_test_arg1 = 123\n expected_test_arg2 = 456\n\n def fn(test_arg1, test_arg2, a):\n if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:\n return ValueError('partial fn does not work correctly')\n return a\n\n wrapped_fn = functools.partial(fn, 123) # binds to test_arg1\n double_wrapped_fn = functools.partial(wrapped_fn, 456) # binds to test_arg2\n\n self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))\n\n self.assertEqual(3, double_wrapped_fn(3)) # pylint: disable=no-value-for-parameter\n self.assertEqual(3, double_wrapped_fn(a=3)) # pylint: disable=no-value-for-parameter\n\n\nclass HasKwargsTest(test.TestCase):\n\n def test_simple_function(self):\n\n fn_has_kwargs = lambda **x: x\n self.assertTrue(function_utils.has_kwargs(fn_has_kwargs))\n\n fn_has_no_kwargs = lambda x: x\n self.assertFalse(function_utils.has_kwargs(fn_has_no_kwargs))\n\n def test_callable(self):\n\n class FooHasKwargs(object):\n\n def __call__(self, **x):\n del x\n self.assertTrue(function_utils.has_kwargs(FooHasKwargs()))\n\n class FooHasNoKwargs(object):\n\n def __call__(self, x):\n del x\n self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs()))\n\n def test_bound_method(self):\n\n class FooHasKwargs(object):\n\n def fn(self, **x):\n del x\n self.assertTrue(function_utils.has_kwargs(FooHasKwargs().fn))\n\n class FooHasNoKwargs(object):\n\n def fn(self, x):\n del x\n self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs().fn))\n\n def test_partial_function(self):\n expected_test_arg = 123\n\n def fn_has_kwargs(test_arg, **x):\n if test_arg != expected_test_arg:\n return ValueError('partial fn does not work correctly')\n return x\n\n wrapped_fn = functools.partial(fn_has_kwargs, test_arg=123)\n self.assertTrue(function_utils.has_kwargs(wrapped_fn))\n some_kwargs = dict(x=1, y=2, z=3)\n self.assertEqual(wrapped_fn(**some_kwargs), some_kwargs)\n\n def fn_has_no_kwargs(x, test_arg):\n if test_arg != expected_test_arg:\n return ValueError('partial fn does not work correctly')\n return x\n\n wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg=123)\n self.assertFalse(function_utils.has_kwargs(wrapped_fn))\n some_arg = 1\n self.assertEqual(wrapped_fn(some_arg), some_arg)\n\n def test_double_partial(self):\n expected_test_arg1 = 123\n expected_test_arg2 = 456\n\n def fn_has_kwargs(test_arg1, test_arg2, **x):\n if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:\n return ValueError('partial does not work correctly')\n return x\n\n wrapped_fn = functools.partial(fn_has_kwargs, test_arg2=456)\n double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)\n\n self.assertTrue(function_utils.has_kwargs(double_wrapped_fn))\n some_kwargs = dict(x=1, y=2, z=3)\n self.assertEqual(double_wrapped_fn(**some_kwargs), some_kwargs)\n\n def fn_has_no_kwargs(x, test_arg1, test_arg2):\n if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:\n return ValueError('partial does not work correctly')\n return x\n\n wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg2=456)\n double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)\n\n self.assertFalse(function_utils.has_kwargs(double_wrapped_fn))\n some_arg = 1\n self.assertEqual(double_wrapped_fn(some_arg), some_arg) # pylint: disable=no-value-for-parameter\n\n def test_raises_type_error(self):\n with self.assertRaisesRegex(TypeError,\n 'should be a callable'):\n function_utils.has_kwargs('not a function')\n\n\nclass GetFuncNameTest(test.TestCase):\n\n def testWithSimpleFunction(self):\n self.assertEqual(\n 'silly_example_function',\n function_utils.get_func_name(silly_example_function))\n\n def testWithClassMethod(self):\n self.assertEqual(\n 'GetFuncNameTest.testWithClassMethod',\n function_utils.get_func_name(self.testWithClassMethod))\n\n def testWithCallableClass(self):\n callable_instance = SillyCallableClass()\n self.assertRegex(\n function_utils.get_func_name(callable_instance),\n '<.*SillyCallableClass.*>')\n\n def testWithFunctoolsPartial(self):\n partial = functools.partial(silly_example_function)\n self.assertRegex(\n function_utils.get_func_name(partial), '<.*functools.partial.*>')\n\n def testWithLambda(self):\n anon_fn = lambda x: x\n self.assertEqual('<lambda>', function_utils.get_func_name(anon_fn))\n\n def testRaisesWithNonCallableObject(self):\n with self.assertRaises(ValueError):\n function_utils.get_func_name(None)\n\n\nclass GetFuncCodeTest(test.TestCase):\n\n def testWithSimpleFunction(self):\n code = function_utils.get_func_code(silly_example_function)\n self.assertIsNotNone(code)\n self.assertRegex(code.co_filename, 'function_utils_test.py')\n\n def testWithClassMethod(self):\n code = function_utils.get_func_code(self.testWithClassMethod)\n self.assertIsNotNone(code)\n self.assertRegex(code.co_filename, 'function_utils_test.py')\n\n def testWithCallableClass(self):\n callable_instance = SillyCallableClass()\n code = function_utils.get_func_code(callable_instance)\n self.assertIsNotNone(code)\n self.assertRegex(code.co_filename, 'function_utils_test.py')\n\n def testWithLambda(self):\n anon_fn = lambda x: x\n code = function_utils.get_func_code(anon_fn)\n self.assertIsNotNone(code)\n self.assertRegex(code.co_filename, 'function_utils_test.py')\n\n def testWithFunctoolsPartial(self):\n partial = functools.partial(silly_example_function)\n code = function_utils.get_func_code(partial)\n self.assertIsNone(code)\n\n def testRaisesWithNonCallableObject(self):\n with self.assertRaises(ValueError):\n function_utils.get_func_code(None)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test for SaveContext.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.saved_model import save_context\nfrom tensorflow.python.saved_model import save_options\n\n\nclass SaveContextTest(test.TestCase):\n\n def test_multi_thread(self):\n self.assertFalse(save_context.in_save_context())\n with self.assertRaisesRegex(ValueError, 'Not in a SaveContext'):\n save_context.get_save_options()\n\n options = save_options.SaveOptions(save_debug_info=True)\n with save_context.save_context(options):\n self.assertTrue(save_context.in_save_context())\n self.assertTrue(save_context.get_save_options().save_debug_info)\n\n entered_context_in_thread = threading.Event()\n continue_thread = threading.Event()\n\n def thread_fn():\n self.assertFalse(save_context.in_save_context())\n with self.assertRaisesRegex(ValueError, 'Not in a SaveContext'):\n save_context.get_save_options()\n\n options = save_options.SaveOptions(save_debug_info=False)\n with save_context.save_context(options):\n self.assertTrue(save_context.in_save_context())\n # save_debug_info has a different value in this thread.\n self.assertFalse(save_context.get_save_options().save_debug_info)\n entered_context_in_thread.set()\n continue_thread.wait()\n\n self.assertFalse(save_context.in_save_context())\n with self.assertRaisesRegex(ValueError, 'Not in a SaveContext'):\n save_context.get_save_options()\n\n t = threading.Thread(target=thread_fn)\n t.start()\n\n entered_context_in_thread.wait()\n # Another thread shouldn't affect this thread.\n self.assertTrue(save_context.in_save_context())\n self.assertTrue(save_context.get_save_options().save_debug_info)\n\n continue_thread.set()\n t.join()\n # Another thread exiting SaveContext shouldn't affect this thread.\n self.assertTrue(save_context.in_save_context())\n self.assertTrue(save_context.get_save_options().save_debug_info)\n\n self.assertFalse(save_context.in_save_context())\n with self.assertRaisesRegex(ValueError, 'Not in a SaveContext'):\n save_context.get_save_options()\n\n def test_enter_multiple(self):\n options = save_options.SaveOptions()\n with self.assertRaisesRegex(ValueError, 'Already in a SaveContext'):\n with save_context.save_context(options):\n with save_context.save_context(options):\n pass\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.util.tf_inspect.Parameter",
"tensorflow.python.ops.array_ops.where_v2",
"tensorflow.python.framework.extension_type.is_packed",
"tensorflow.python.framework.type_spec.lookup",
"tensorflow.python.framework.extension_type.AnonymousExtensionType",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.python.framework.extension_type.pack",
"tensorflow.python.util.tf_inspect.signature",
"tensorflow.python.util.dispatch.dispatch_for_types",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.framework.type_spec._TYPE_SPEC_TO_NAME.pop",
"tensorflow.python.platform.test.get_temp_dir",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.ops.control_flow_ops.while_loop_v2",
"tensorflow.python.saved_model.save.save",
"tensorflow.python.framework.extension_type_field.ExtensionTypeField",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.framework.extension_type.unpack",
"tensorflow.python.framework.type_spec.register",
"tensorflow.python.framework.extension_type.reinterpret",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.extension_type.ExtensionType",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.module.module.Module",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.saved_model.load.load",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.type_spec._NAME_TO_TYPE_SPEC.pop",
"tensorflow.python.util.tf_inspect.Signature",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.eager.def_function.function",
"tensorflow.python.training.tracking.tracking.AutoTrackable",
"tensorflow.lite.python.interpreter.Interpreter",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.data.experimental.ops.distribute._AutoShardDataset",
"tensorflow.python.data.util.traverse.obtain_all_variant_tensor_ops",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.op_def_registry.get",
"tensorflow.python.data.experimental.ops.distribute._AutoShardDatasetV1",
"tensorflow.python.data.ops.dataset_ops._VariantDataset"
],
[
"tensorflow.python.util.compat.as_str_any",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.saved_model.function_serialization.wrap_cached_variables",
"tensorflow.python.framework.tensor_spec.TensorSpec.from_tensor",
"tensorflow.python.util.compat.as_str",
"tensorflow.python.util.nest.flatten"
],
[
"tensorflow.python.util.function_utils.has_kwargs",
"tensorflow.python.util.function_utils.get_func_code",
"tensorflow.python.platform.test.main",
"tensorflow.python.util.function_utils.fn_args",
"tensorflow.python.util.function_utils.get_func_name"
],
[
"tensorflow.python.eager.test.main",
"tensorflow.python.saved_model.save_context.save_context",
"tensorflow.python.saved_model.save_context.get_save_options",
"tensorflow.python.saved_model.save_context.in_save_context",
"tensorflow.python.saved_model.save_options.SaveOptions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"2.7",
"1.4",
"2.2",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.6",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.12",
"2.6",
"2.7",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.4",
"2.9",
"2.5",
"2.6",
"2.10"
]
}
] |
mavroudisv/acme | [
"3eb4d55a67ea460710ba9e2b2ecf1aa339ba7d2d",
"3eb4d55a67ea460710ba9e2b2ecf1aa339ba7d2d"
] | [
"acme/agents/tf/d4pg/agent_test.py",
"acme/adders/reverb/episode.py"
] | [
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the D4PG agent.\"\"\"\n\nimport sys\nfrom typing import Dict, Sequence\n\nfrom absl.testing import absltest\nimport acme\nfrom acme import specs\nfrom acme import types\nfrom acme.agents.tf import d4pg\nfrom acme.testing import fakes\nfrom acme.tf import networks\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\n\n\ndef make_networks(\n action_spec: types.NestedSpec,\n policy_layer_sizes: Sequence[int] = (10, 10),\n critic_layer_sizes: Sequence[int] = (10, 10),\n vmin: float = -150.,\n vmax: float = 150.,\n num_atoms: int = 51,\n) -> Dict[str, snt.Module]:\n \"\"\"Creates networks used by the agent.\"\"\"\n\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n policy_layer_sizes = list(policy_layer_sizes) + [num_dimensions]\n\n policy_network = snt.Sequential(\n [networks.LayerNormMLP(policy_layer_sizes), tf.tanh])\n critic_network = snt.Sequential([\n networks.CriticMultiplexer(\n critic_network=networks.LayerNormMLP(\n critic_layer_sizes, activate_final=True)),\n networks.DiscreteValuedHead(vmin, vmax, num_atoms)\n ])\n\n return {\n 'policy': policy_network,\n 'critic': critic_network,\n }\n\n\nclass D4PGTest(absltest.TestCase):\n\n def test_d4pg(self):\n # Create a fake environment to test with.\n environment = fakes.ContinuousEnvironment(episode_length=10, bounded=True)\n spec = specs.make_environment_spec(environment)\n\n # Create the networks.\n agent_networks = make_networks(spec.actions)\n\n # Construct the agent.\n agent = d4pg.D4PG(\n environment_spec=spec,\n policy_network=agent_networks['policy'],\n critic_network=agent_networks['critic'],\n batch_size=10,\n samples_per_insert=2,\n min_replay_size=10,\n )\n\n # Try running the environment loop. We have no assertions here because all\n # we care about is that the agent runs without raising any errors.\n loop = acme.EnvironmentLoop(environment, agent)\n loop.run(num_episodes=2)\n # Imports check\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Episode adders.\n\nThis implements full episode adders, potentially with padding.\n\"\"\"\n\nfrom typing import Callable, Optional, Iterable, Tuple\n\nfrom acme import specs\nfrom acme import types\nfrom acme.adders.reverb import base\nfrom acme.adders.reverb import utils\n\nimport dm_env\nimport numpy as np\nimport reverb\nimport tensorflow as tf\nimport tree\n\n_PaddingFn = Callable[[Tuple[int, ...], np.dtype], np.ndarray]\n\n\nclass EpisodeAdder(base.ReverbAdder):\n \"\"\"Adder which adds entire episodes as trajectories.\"\"\"\n\n def __init__(\n self,\n client: reverb.Client,\n max_sequence_length: int,\n delta_encoded: bool = False,\n priority_fns: Optional[base.PriorityFnMapping] = None,\n max_in_flight_items: int = 1,\n padding_fn: Optional[_PaddingFn] = None,\n # Deprecated kwargs.\n chunk_length: Optional[int] = None,\n ):\n del chunk_length\n\n super().__init__(\n client=client,\n max_sequence_length=max_sequence_length,\n delta_encoded=delta_encoded,\n priority_fns=priority_fns,\n max_in_flight_items=max_in_flight_items,\n )\n self._padding_fn = padding_fn\n\n def add(\n self,\n action: types.NestedArray,\n next_timestep: dm_env.TimeStep,\n extras: types.NestedArray = (),\n ):\n if self._writer.episode_steps >= self._max_sequence_length - 1:\n raise ValueError(\n 'The number of observations within the same episode will exceed '\n 'max_sequence_length with the addition of this transition.')\n\n super().add(action, next_timestep, extras)\n\n def _write(self):\n # This adder only writes at the end of the episode, see _write_last()\n pass\n\n def _write_last(self):\n if self._padding_fn is not None and self._writer.episode_steps < self._max_sequence_length:\n history = self._writer.history\n padding_step = dict(\n observation=history['observation'],\n action=history['action'],\n reward=history['reward'],\n discount=history['discount'],\n extras=history.get('extras', ()))\n # Get shapes and dtypes from the last element.\n padding_step = tree.map_structure(\n lambda col: self._padding_fn(col[-1].shape, col[-1].dtype),\n padding_step)\n padding_step['start_of_episode'] = False\n while self._writer.episode_steps < self._max_sequence_length:\n self._writer.append(padding_step)\n\n trajectory = tree.map_structure(lambda x: x[:], self._writer.history)\n\n # Pack the history into a base.Step structure and get numpy converted\n # variant for priotiy computation.\n trajectory = base.Trajectory(**trajectory)\n\n # Calculate the priority for this episode.\n table_priorities = utils.calculate_priorities(self._priority_fns,\n trajectory)\n\n # Create a prioritized item for each table.\n for table_name, priority in table_priorities.items():\n self._writer.create_item(table_name, priority, trajectory)\n self._writer.flush(self._max_in_flight_items)\n\n # TODO(b/185309817): make this into a standalone method.\n @classmethod\n def signature(cls,\n environment_spec: specs.EnvironmentSpec,\n extras_spec: types.NestedSpec = (),\n sequence_length: Optional[int] = None):\n \"\"\"This is a helper method for generating signatures for Reverb tables.\n\n Signatures are useful for validating data types and shapes, see Reverb's\n documentation for details on how they are used.\n\n Args:\n environment_spec: A `specs.EnvironmentSpec` whose fields are nested\n structures with leaf nodes that have `.shape` and `.dtype` attributes.\n This should come from the environment that will be used to generate the\n data inserted into the Reverb table.\n extras_spec: A nested structure with leaf nodes that have `.shape` and\n `.dtype` attributes. The structure (and shapes/dtypes) of this must be\n the same as the `extras` passed into `ReverbAdder.add`.\n sequence_length: An optional integer representing the expected length of\n sequences that will be added to replay.\n\n Returns:\n A `Step` whose leaf nodes are `tf.TensorSpec` objects.\n \"\"\"\n\n def add_time_dim(paths: Iterable[str], spec: tf.TensorSpec):\n return tf.TensorSpec(\n shape=(sequence_length, *spec.shape),\n dtype=spec.dtype,\n name='/'.join(str(p) for p in paths))\n\n trajectory_env_spec, trajectory_extras_spec = tree.map_structure_with_path(\n add_time_dim, (environment_spec, extras_spec))\n\n trajectory_spec = base.Trajectory(\n *trajectory_env_spec,\n start_of_episode=tf.TensorSpec(\n shape=(sequence_length,), dtype=tf.bool, name='start_of_episode'),\n extras=trajectory_extras_spec)\n\n return trajectory_spec\n"
] | [
[
"numpy.prod"
],
[
"tensorflow.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
acgtcoder/lcdblib | [
"a3e5c81b841f0a06e63641f1cbcc24fc207f40f0"
] | [
"lcdblib/parse/rseqc.py"
] | [
"import pandas as pd\nimport re\nfrom collections import OrderedDict\n\ndef parse_inferExperiment(sample, file):\n \"\"\"Parse rseqc infer expeirment.\n Parameters\n ----------\n sample: str\n Sample name which will be added as row index.\n file: str\n Path to the fastqc zip file.\n\n \"\"\"\n with open(file, 'r') as fh:\n parsed = OrderedDict()\n for l in fh:\n fqs = re.search(r\"^(.+?):\\s+([\\d\\.]+)$\", l)\n if fqs:\n parsed[fqs.group(1)] = float(fqs.group(2))\n\n if len(parsed) == 0:\n return None\n else:\n return pd.DataFrame(parsed, index=[sample])\n\n\ndef parse_geneBodyCoverage(sample, file):\n \"\"\"Parse rseqc genebody coverage.\n\n Parameters\n ----------\n sample: str\n Sample name which will be added as row index.\n file: str\n Path to the fastqc zip file.\n\n \"\"\"\n with open(file, 'r') as fh:\n lines = fh.readlines()\n header = lines[0].strip().split('\\t')[1:]\n values = lines[1].strip().split('\\t')[1:]\n parsed = OrderedDict()\n for k, v in zip(header, values):\n parsed[int(k)] = float(v)\n if len(parsed) == 0:\n return None\n else:\n return pd.DataFrame(parsed, index=[sample])\n\n\ndef parse_bamStat(sample, file):\n \"\"\"Parse rseqc bam stat.\n\n Parameters\n ----------\n sample: str\n Sample name which will be added as row index.\n file: str\n Path to the fastqc zip file.\n\n \"\"\"\n with open(file, 'r') as fh:\n parsed = OrderedDict()\n for l in fh:\n fqs = re.search(r\"^(.+?):\\s*(\\d+)$\", l)\n if fqs:\n parsed[fqs.group(1)] = int(fqs.group(2))\n\n if len(parsed) == 0:\n return None\n else:\n return pd.DataFrame(parsed, index=[sample])\n\n\ndef parse_tin(sample, file):\n \"\"\"Parse rseqc tin.\n\n Parameters\n ----------\n sample: str\n Sample name which will be added as row index.\n file: str\n Path to the fastqc zip file.\n\n \"\"\"\n with open(file, 'r') as fh:\n lines = fh.readlines()\n header = lines[0].strip().split('\\t')[1:]\n values = lines[1].strip().split('\\t')[1:]\n parsed = OrderedDict()\n for k, v in zip(header, values):\n parsed[k] = float(v)\n if len(parsed) == 0:\n return None\n else:\n return pd.DataFrame(parsed, index=[sample])\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
msarahan/ml_smoketest | [
"b7dbebb99b571b4af04bdaa7513817b14c10f63f"
] | [
"net_surgery.py"
] | [
"\n# coding: utf-8\n\nimport os\nimport subprocess\nimport numpy as np\nimport caffe\n\n\ndef main():\n if os.path.isfile('../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):\n print('CaffeNet found.')\n else:\n print('Downloading pre-trained CaffeNet model...')\n subprocess.call(['python', \"../scripts/download_model_binary.py\",\n \"../models/bvlc_reference_caffenet\"])\n # Load the net, list its data and params, and filter an example image.\n caffe.set_mode_cpu()\n net = caffe.Net('net_surgery/conv.prototxt', caffe.TEST)\n print(\"blobs {}\\nparams {}\".format(net.blobs.keys(), net.params.keys()))\n\n # load image and prepare as a single input batch for Caffe\n im = np.array(caffe.io.load_image('images/cat_gray.jpg', color=False)).squeeze()\n\n im_input = im[np.newaxis, np.newaxis, :, :]\n net.blobs['data'].reshape(*im_input.shape)\n net.blobs['data'].data[...] = im_input\n\n # helper show filter outputs\n def show_filters(net):\n net.forward()\n filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max()\n\n # filter the image with initial\n show_filters(net)\n\n # pick first filter output\n conv0 = net.blobs['conv'].data[0, 0]\n print(\"pre-surgery output mean {:.2f}\".format(conv0.mean()))\n # set first filter bias to 1\n net.params['conv'][1].data[0] = 1.\n net.forward()\n print(\"post-surgery output mean {:.2f}\".format(conv0.mean()))\n\n ksize = net.params['conv'][0].data.shape[2:]\n # make Gaussian blur\n sigma = 1.\n y, x = np.mgrid[-ksize[0]//2 + 1:ksize[0]//2 + 1, -ksize[1]//2 + 1:ksize[1]//2 + 1]\n g = np.exp(-((x**2 + y**2)/(2.0*sigma**2)))\n gaussian = (g / g.sum()).astype(np.float32)\n net.params['conv'][0].data[0] = gaussian\n # make Sobel operator for edge detection\n net.params['conv'][0].data[1:] = 0.\n sobel = np.array((-1, -2, -1, 0, 0, 0, 1, 2, 1), dtype=np.float32).reshape((3,3))\n net.params['conv'][0].data[1, 0, 1:-1, 1:-1] = sobel # horizontal\n net.params['conv'][0].data[2, 0, 1:-1, 1:-1] = sobel.T # vertical\n show_filters(net)\n\n # Load the original network and extract the fully connected layers' parameters.\n net = caffe.Net('../models/bvlc_reference_caffenet/deploy.prototxt',\n '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',\n caffe.TEST)\n params = ['fc6', 'fc7', 'fc8']\n # fc_params = {name: (weights, biases)}\n fc_params = {pr: (net.params[pr][0].data, net.params[pr][1].data) for pr in params}\n\n for fc in params:\n print('{} weights are {} dimensional and biases are {} dimensional'.format(fc, fc_params[fc][0].shape, fc_params[fc][1].shape))\n\n # Load the fully convolutional network to transplant the parameters.\n net_full_conv = caffe.Net('net_surgery/bvlc_caffenet_full_conv.prototxt',\n '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',\n caffe.TEST)\n params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv']\n # conv_params = {name: (weights, biases)}\n conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[pr][1].data) for pr in params_full_conv}\n\n for conv in params_full_conv:\n print('{} weights are {} dimensional and biases are {} dimensional'.format(conv, conv_params[conv][0].shape, conv_params[conv][1].shape))\n\n for pr, pr_conv in zip(params, params_full_conv):\n conv_params[pr_conv][0].flat = fc_params[pr][0].flat # flat unrolls the arrays\n conv_params[pr_conv][1][...] = fc_params[pr][1]\n\n\n net_full_conv.save('net_surgery/bvlc_caffenet_full_conv.caffemodel')\n\n # load input and configure preprocessing\n im = caffe.io.load_image('images/cat.jpg')\n transformer = caffe.io.Transformer({'data': net_full_conv.blobs['data'].data.shape})\n transformer.set_mean('data', np.load('../python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))\n transformer.set_transpose('data', (2,0,1))\n transformer.set_channel_swap('data', (2,1,0))\n transformer.set_raw_scale('data', 255.0)\n # make classification map by forward and print prediction indices at each location\n out = net_full_conv.forward_all(data=np.asarray([transformer.preprocess('data', im)]))\n print(out['prob'][0].argmax(axis=0))\n # show net input and confidence map (probability of the top prediction at each location)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.load",
"numpy.array",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
asamadiya/onnxruntime | [
"6b3645d97ab222d28bd515f4990af8868194eb52"
] | [
"onnxruntime/test/python/quantization/test_op_concat.py"
] | [
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport unittest\nimport numpy as np\nfrom onnx import helper, TensorProto, numpy_helper, save\nfrom onnxruntime.quantization import quantize_static, QuantFormat\nfrom op_test_utils import InputFeedsNegOneZeroOne, check_model_correctness, check_op_type_count\n\n\nclass TestONNXModel(unittest.TestCase):\n def construct_model(self, model_path):\n # (input)\n # / | \\\n # / | \\\n # / | \\\n # / | \\\n # Conv(1) Conv(2) conv(3)\n # \\ | /\n # \\ | /\n # \\ | /\n # Concat\n # |\n # Identity\n # |\n # (output)\n initializers = []\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 15, 15])\n output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 13, 13, 13])\n\n # Conv1 output [1, 2, 13, 13]\n conv1_weight_initializer = numpy_helper.from_array(\n np.random.randint(-1, 2, [2, 3, 3, 3]).astype(np.float32), name='conv1_weight')\n conv1_node = helper.make_node('Conv', ['input', 'conv1_weight'], ['conv1_output'], name='conv1_node')\n\n # Conv2 output [1, 5, 13, 13]\n conv2_weight_initializer = numpy_helper.from_array(\n np.random.randint(-1, 2, [5, 3, 3, 3]).astype(np.float32), name='conv2_weight')\n conv2_node = helper.make_node('Conv', ['input', 'conv2_weight'], ['conv2_output'], name='conv2_node')\n\n # Conv3 output [1, 6, 13, 13]\n conv3_weight_initializer = numpy_helper.from_array(\n np.random.randint(-1, 2, [6, 3, 3, 3]).astype(np.float32), name='conv3_weight')\n conv3_node = helper.make_node('Conv', ['input', 'conv3_weight'], ['conv3_output'], name='conv3_node')\n\n concat_node = helper.make_node('Concat', ['conv1_output', 'conv2_output', 'conv3_output'], [\n 'concat_output'], name='concat_node', axis=1)\n\n identity_node = helper.make_node('Identity', ['concat_output'], ['output'], name='identity_node')\n\n initializers = [conv1_weight_initializer, conv2_weight_initializer, conv3_weight_initializer]\n graph = helper.make_graph([conv1_node, conv2_node, conv3_node, concat_node, identity_node],\n 'qlinear_concat_op_test', [input], [output], initializer=initializers)\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n save(model, model_path)\n\n def test_quantize_concat(self):\n np.random.seed(1)\n\n model_fp32_path = 'concat_fp32.onnx'\n model_uint8_path = 'concat_uint8.onnx'\n model_uint8_qdq_path = 'concat_uint8_qdq.onnx'\n\n self.construct_model(model_fp32_path)\n\n # Verify QOperator mode\n data_reader = InputFeedsNegOneZeroOne(1, {'input': [1, 3, 15, 15]})\n quantize_static(model_fp32_path, model_uint8_path, data_reader)\n\n qnode_counts = {'QLinearConv': 3, 'QuantizeLinear': 1, 'DequantizeLinear': 1, 'QLinearConcat': 1}\n check_op_type_count(self, model_uint8_path, **qnode_counts)\n data_reader.rewind()\n check_model_correctness(self, model_fp32_path, model_uint8_path, data_reader.get_next())\n\n # Verify QDQ mode\n data_reader.rewind()\n quantize_static(model_fp32_path, model_uint8_qdq_path, data_reader, quant_format=QuantFormat.QDQ)\n qdqnode_counts = {'Conv': 3, 'QuantizeLinear': 5, 'DequantizeLinear': 8, 'Concat': 1}\n check_op_type_count(self, model_uint8_qdq_path, **qdqnode_counts)\n data_reader.rewind()\n check_model_correctness(self, model_fp32_path, model_uint8_qdq_path, data_reader.get_next())\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.seed",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HarmanDotpy/GeNeVA | [
"26042826d37206cc9ccd9fbeee5bfcae95dda5a6"
] | [
"geneva/models/image_encoder.py"
] | [
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\"\"\"Image encoder using ResBlocks\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom geneva.definitions.res_blocks import ResDownBlock\n\n\nclass ImageEncoder(nn.Module):\n def __init__(self, cfg):\n \"\"\"Encodes Image to 16x16 features maps of depth 256\n , return the 16x16 features as well as the global sum\n pooled features(shape=512)\"\"\"\n super().__init__()\n self.encode_image = cfg.use_fg\n\n if self.encode_image:\n if cfg.img_encoder_type == 'res_blocks':\n self.image_encoder = nn.Sequential(\n # 3 x 128 x 128\n ResDownBlock(3, 64, downsample=True,\n use_spectral_norm=False),\n # 64 x 64 x 64\n nn.BatchNorm2d(64),\n ResDownBlock(64, 128, downsample=True,\n use_spectral_norm=False),\n # 128 x 32 x 32\n nn.BatchNorm2d(128),\n ResDownBlock(128, cfg.image_feat_dim,\n downsample=True,\n use_spectral_norm=False),\n nn.BatchNorm2d(cfg.image_feat_dim),\n # 256 x 16 x 16\n )\n elif cfg.img_encoder_type == 'conv':\n self.image_encoder = nn.Sequential(\n nn.Conv2d(3, 64, 4, 2, 1, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(64, 128, 4, 2, 1, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(128),\n nn.Conv2d(128, cfg.image_feat_dim, 4, 2, 1,\n bias=False),\n nn.BatchNorm2d(cfg.image_feat_dim),\n )\n\n self.object_detector = nn.Linear(cfg.image_feat_dim,\n cfg.num_objects)\n\n self.cfg = cfg\n\n def forward(self, img):\n if not self.encode_image:\n return None, None, None\n\n image_features = self.image_encoder(img)\n pooled_features = torch.sum(image_features, dim=(2, 3))\n\n object_detections = F.sigmoid(self.object_detector(pooled_features))\n return image_features, pooled_features, object_detections\n"
] | [
[
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
evenlwanvik/TTK-4900-Master | [
"172b444c44f65941ea162c64bc917924fc8e996b"
] | [
"src/training_data.py"
] | [
"from training_data.eddies import eddy_detection,dataframe_eddies,plot_eddies,julianh2gregorian\nfrom tools.machine_learning import sliding_window\nfrom matplotlib.patches import Rectangle\nfrom tools.load_nc import load_netcdf4\nfrom numpy import savez_compressed\nimport matplotlib.pyplot as plt\nfrom tools.bfs import bfs\nfrom datetime import date\nfrom math import cos, pi\nfrom operator import eq\nimport tools.dim as dim\nfrom tools import gui\nimport xarray as xr\nimport numpy as np\nimport itertools\nimport argparse\nimport datetime\nimport logging\nimport random\nimport cv2\nimport os\nimport io\nimport sys\n\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom keras.models import load_model\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport pdb\n\nargp = argparse.ArgumentParser()\nargp.add_argument(\"-fd\", \"--fDir\", default='C:/Master/data/cmems_data/global_10km/', help=\"CMEMS grid data directory path\")\n#argp.add_argument(\"-fd\", \"--fDir\", default='D:/Master/data/cmems_data/global_10km/', help=\"CMEMS grid data directory path\")\nargp.add_argument(\"-rs\", \"--size\", default=1.3, help=\"rectangular patche size multiplier\")\nargp.add_argument(\"-sd\", \"--savedir\", default='C:/Master/TTK-4900-Master/data/', help=\"training data save dir\")\n#argp.add_argument(\"-sd\", \"--savedir\", default='D:/Master/TTK-4900-Master/data/', help=\"training data save dir\")\nargs = argp.parse_args()\n\nlogPath = f\"{os.path.dirname(os.path.realpath(__file__))}/training_data/log\"\nlogName = f\"{datetime.datetime.now().strftime('%d%m%Y_%H%M')}.log\"\n\nif not os.path.exists(logPath):\n os.makedirs(logPath)\n\n# create logger \nlogger = logging.getLogger(\"Training Data\")\nlogger.setLevel(logging.INFO)\n# create file handler \nfh = logging.FileHandler(\"{0}/{1}\".format(logPath, logName))\nfh.setLevel(logging.INFO)\n# create console handler\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n# create formatter and add it to the handlers\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n# add the handlers to the logger\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\ndef lon2km(lon, lat):\n \"\"\" Convert from longitudinal displacement to km \"\"\"\n return lon * 111.320e3 * cos(lat)\n \n\ndef lat2km(lat):\n \"\"\" Convert from latitudinal displacement to km \"\"\"\n return 110.54e3 * lat\n\n\ndef index_list(ncols,nrows):\n \"\"\"\" Create an array of dimension nrows x ncols with indeces as values \"\"\"\n return [[(i,j) for j in range(nrows)] for i in range(ncols)]\n\n\ndef random_grids(arr, nOut):\n \"\"\" Get nOut random grids from arr \"\"\"\n nTot = dim.shape(arr)[0]\n x = random.sample(range(nTot), nOut)\n return [ arr[i] for i in x ]\n\n\ndef create_subgrids(arr, nrows, ncols, inner=1):\n \"\"\"\n Return an array of shape (n, nrows, ncols) where\n n * nrows * ncols = arr.size\n If arr is a 2D array, the returned array should look like n subblocks with\n each subblock preserving the \"physical\" layout of arr.\n 'inner' tells the dimension of the array elements, i.e. 2 if tuple, 1 if single element\n \"\"\"\n h, w = dim.shape(arr)[0:2]\n arr = np.array(arr)\n assert h % nrows == 0, \"{} rows is not evenly divisble by {}\".format(h, nrows)\n assert w % ncols == 0, \"{} cols is not evenly divisble by {}\".format(w, ncols)\n return (arr.reshape(h//nrows, nrows, -1, ncols, inner)# last 2 is because array consists of 2d idx\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols, inner)) \n\n\ndef plot_grids(data, lon, lat, larger_grid=None, title=\"__\"):\n #\"quickscript\" to plot and investigate images\n\n shape = data[1].shape\n # needs to be larger than 2x2\n if shape[0] < 2 or shape[1] < 2: \n return 'No'\n\n fig, axs = plt.subplots(2, 2, figsize=(12, 8))\n\n # levels for the phase angle to make it not interpolate \n levels = MaxNLocator(nbins=10).tick_values(data[4].min(), data[4].max())\n cmap = plt.get_cmap('CMRmap')\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n axs[0,0].pcolormesh(lon, lat, data[4].T, cmap=cmap, norm=norm)\n\n axs[0,1].contourf(lon, lat, data[1].T, 20, cmap='rainbow')\n n=-1\n color_array = np.sqrt(((data[2]-n)/2)**2 + ((data[3]-n)/2)**2)\n axs[1,0].quiver(lon, lat, data[2].T, data[3].T, color_array, scale=3) # Plot vector field\n if larger_grid is not None:\n axs[1,1].contourf(larger_grid[0], larger_grid[1], larger_grid[2].T, 20, cmap='rainbow') # show a larger parcel to analyze the surroundings\n #axs[1,2].contourf(lon, lat, data[5].T, 10) # Or plot the OW values\n \n fig.suptitle(title, fontsize=16)\n\n guiEvent, guiValues = gui.show_figure(fig)\n plt.close(fig)\n\n return guiEvent\n\n\ndef eddy_metrics(eddies_ma, centerIdxs, lon, lat):\n \"\"\" \n Finds metrics such as index centroid and diameter of eddy \n\n Parameters:\n ----------\n eddies_ma : masked array\n masked array received from the OW-R2 eddy_detection algorithm\n centereIdxs : tuple\n tuple with (lon,lat) indeces of the center coordinates of eddy\n \n returns:\n ----------\n float: diameter of the eddy\n tuple: (lon,lat) index of center\n \"\"\"\n start = centerIdxs[0], centerIdxs[1]\n neighbors = (-1, 0), (0, +1), (+1, 0), (0, -1) # possible neighbors\n similar = eq # Eq method test the equality of the values.\n\n # Run BFS to find the indexes in eddy from the masked array\n eddyIdxs = np.array( list( bfs(eddies_ma, neighbors, start, similar) ) )\n\n # Find center lon/lat index of eddy\n lonCtrIdx = int( eddyIdxs[:,0].mean() )\n latCtrIdx = int( eddyIdxs[:,1].mean() )\n\n # Find the displacement in lon/lat direction in km, and use the largest as diameter of eddy.\n lonDiameter_km = lon2km( eddyIdxs[:,0].max()-eddyIdxs[:,0].min(), lat[latCtrIdx]) * 0.083 \n latDiameter_km = lat2km( eddyIdxs[:,1].max()-eddyIdxs[:,1].min() ) * 0.083 # 0.083 degrees resolution per index\n\n largest_diameter_km = np.max([lonDiameter_km, latDiameter_km])\n\n return largest_diameter_km, (lonCtrIdx, latCtrIdx)\n\n\ndef check_cyclone(flag):\n # positive 1 denotes positive rotation (counter-clockwise), which is a cyclone in the norther hemisphere\n if flag==1: return \"cyclone\"\n elif flag==-1: return \"anticyclone\"\n else: return \"nothing\"\n\n\ndef save_npz_array(ds, savedir=args.savedir):\n # If folder doesn't exist, create folder and just save the data for the first day\n if not os.path.exists(savedir):\n os.makedirs(savedir)\n savez_compressed( f'{savedir}/sst_train.npz', ds[0])\n savez_compressed( f'{savedir}/ssl_train.npz', ds[1])\n savez_compressed( f'{savedir}/uvel_train.npz', ds[2])\n savez_compressed( f'{savedir}/vvel_train.npz', ds[3])\n savez_compressed( f'{savedir}/phase_train.npz', ds[4])\n # If not, we open and append to the existing data\n else:\n with np.load(f'{savedir}/sst_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed( f'{savedir}/sst_train.npz', np.append(data['arr_0'], ds[0], axis=0))\n with np.load(f'{savedir}/ssl_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed( f'{savedir}/ssl_train.npz', np.append(data['arr_0'], ds[1], axis=0))\n with np.load(f'{savedir}/uvel_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed(f'{savedir}/uvel_train.npz', np.append(data['arr_0'], ds[2], axis=0))\n with np.load(f'{savedir}/vvel_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed(f'{savedir}/vvel_train.npz', np.append(data['arr_0'], ds[3], axis=0))\n with np.load(f'{savedir}/phase_train.npz', 'w+', allow_pickle=True) as data:\n savez_compressed(f'{savedir}/phase_train.npz', np.append(data['arr_0'], ds[4], axis=0))\n\n\ndef semi_automatic_training():\n \"\"\" This appplication lets you maneuver through windows selected by the OW process.\n In short: OW below the OW_start threshold, which passes the R2_cirterion are considered eddies,\n have a look at https://github.com/JASaa/eddies-R2 for more info about the sample selection process\"\"\"\n\n # Loop through every netcdf file in directory, usually they are spaced by 5 days\n for fName in os.listdir(args.fDir):\n \n if not fName.endswith(\".nc\"):\n continue\n \n logger.info(\"loading netcdf\")\n\n # load data\n (ds,t,lon,lat,depth,uvel_full,vvel_full,sst_full,ssl_full) = load_netcdf4(args.fDir + fName)\n\n # Confidence level, usually 90%\n R2_criterion = 0.90\n\n # OW value at which to begin the evaluation of R2, default was -1, want to use -8 to be absolutely sure\n OW_start = -6.0\n\n # Number of local minima to evaluate using R2 method.\n # Set low (like 20) to see a few R2 eddies quickly.\n # Set high (like 1e5) to find all eddies in domain.\n max_evaluation_points = 100000 \n\n # Minimum number of cells required to be identified as an eddie.\n min_eddie_cells = 3 # set to 3 to be coherent with the use of the R2 method, 3 points seems like a reasonable minimun for a correlation \n\n # z-level to plot. Usually set to 0 for the surface.\n k_plot = 0\n\n dlon = abs(lon[0]-lon[1])\n dlat = abs(lat[0]-lat[1])\n\n # Create eddy images for each day in datase\n #for day, time in enumerate(t):\n # Shuffle the time so that the expert won't see the same long-lasting eddies\n\n for day in random.sample(range(0, len(t)), len(t)): \n\n dateStr = \"{:%d-%m-%Y}\".format(datetime.date(1950, 1, 1) + datetime.timedelta(hours=float(t[day])) )\n logger.info(f\"Creating images for dataset {dateStr}\")\n\n # create a text trap\n text_trap = io.StringIO()\n sys.stdout = text_trap\n\n # Run the OW-R2 algorithm\n lon,lat,u,v,vorticity,OW,OW_eddies,eddie_census,nEddies,circulation_mask = eddy_detection(\n lon,lat,depth,uvel_full,vvel_full,day,R2_criterion,OW_start,max_evaluation_points,min_eddie_cells)\n\n # restore stdout\n sys.stdout = sys.__stdout__\n\n sst_train = []\n ssl_train = []\n uvel_train = []\n vvel_train = []\n phase_train = []\n nDataset = 5\n\n # =========================================================\n # ============== Prepare datasets and lists ===============\n # =========================================================\n\n eddyCtrIdx = []\n for i in range(0,nEddies):\n lonIdx = np.argmax(lon>eddie_census[2,i])-1\n latIdx = np.argmax(lat>eddie_census[3,i])-1\n eddyCtrIdx.append( (lonIdx, latIdx) )\n\n # Netcdf uses (lat,lon) we want to use (lon,lat) and discard the depth\n sst = sst_full[day,:,:].T\n ssl = ssl_full[day,:,:].T\n uvel = uvel_full[day,0,:,:].T\n vvel = vvel_full[day,0,:,:].T\n # Calculate the phase angle (direction) of the current\n with np.errstate(all='ignore'): # Disable zero div warning\n phase = xr.ufuncs.rad2deg( xr.ufuncs.arctan2(vvel, uvel) ) + 180\n OW = OW[:,:,0]\n nLon = len(lon)\n nLat = len(lat)\n\n datasets = (sst, ssl, uvel, vvel, phase, OW) \n \n # =========================================================\n # ======= Create rectangular patches around eddies ========\n # =========================================================\n\n logger.info(f\"+ Creating rectangles for {nEddies} eddies\")\n\n savedImgCounter = 0 # saved image counter for file ID\n for eddyId, ctrIdx in enumerate(eddyCtrIdx): # nEddies\n\n ctrCoord = lon[ctrIdx[0]], lat[ctrIdx[1]]\n diameter_km = eddie_census[5][eddyId]\n\n bfs_diameter_km, bfs_center = eddy_metrics(OW_eddies, ctrIdx, lon, lat)\n\n # Positive rotation (counter-clockwise) is a cyclone in the northern hemisphere because of the coriolis effect\n if (eddie_census[1][eddyId] > 0.0): cyclone = 1 # 1 is a cyclone, 0 is nothing and -1 is anti-cyclone (negative rotation)\n else: cyclone = -1\n\n logger.info(f\"+++ Creating rectangles for {check_cyclone(cyclone)} with center {ctrCoord} and diameter {diameter_km}\")\n \n # Find rectangle metrics\n height = args.size * abs(diameter_km / 110.54) # 1 deg = 110.54 km, 1.2 to be sure the image covers the eddy\n width = args.size * abs(diameter_km / (111.320 * cos(lat[ctrIdx[1]]))) # 1 deg = 111.320*cos(latitude) km, using center latitude as ref\n\n lon_bnds = ctrCoord[0]-width/2.0, ctrCoord[0]+width/2.0\n lat_bnds = ctrCoord[1]-height/2.0, ctrCoord[1]+height/2.0\n \n # Indeces of current eddy image\n lonIdxs = np.where((lon >= lon_bnds[0]) & (lon <= lon_bnds[1]))[0]\n latIdxs = np.where((lat >= lat_bnds[0]) & (lat <= lat_bnds[1]))[0]\n\n eddy_data = np.array([np.zeros((lonIdxs.size,latIdxs.size)) for _ in range(6)])\n \n # Plot and flag to save eddy\n #add = plot_grids(eddy_data, lo, la, title)\n\n #-------- Move closer to center of eddy ------------\n\n title = dateStr + \"_\" + check_cyclone(cyclone)\n\n choices = ('Center', 'incLon', 'incLat', 'decLon', 'decLat')\n response = 'Center'\n #response = 'Yes' # Skip this section for debugging non-eddy section\n while response in choices:\n\n lo = lon[lonIdxs]\n la = lat[latIdxs]\n\n for i, loIdx in enumerate(lonIdxs):\n for j, laIdx in enumerate(latIdxs):\n for k, measurement in enumerate(datasets): # for every measurement type in datasets\n eddy_data[k,i,j] = measurement[loIdx,laIdx]\n\n # Store a larger grid to make it easier to see if we have an eddy and if we should center image \n if (lonIdxs[0]-5 < 0 or lonIdxs[-1]+5 >= nLon) or (latIdxs[0]-3 < 0 or latIdxs[-1]+3 >= nLat):\n larger_grid = None\n else:\n larger_grid = [ np.zeros(lonIdxs.size+10), np.zeros(latIdxs.size+6), \n np.zeros((lonIdxs.size+10,latIdxs.size+6)), ]\n for i, loIdx in enumerate(range(lonIdxs[0]-5, lonIdxs[-1]+6)):\n for j, laIdx in enumerate(range(latIdxs[0]-3, latIdxs[-1]+4)):\n larger_grid[0][i] = lon[loIdx]\n larger_grid[1][j] = lat[laIdx]\n larger_grid[2][i,j] = ssl[loIdx,laIdx]\n\n response = plot_grids(eddy_data, lo, la, larger_grid, title)\n if response not in choices: # TODO: feel like this is a silly way of doing this\n break\n if response == 'Center':\n # Find the center from water level\n logger.info(f\"+++ Centering eddy towards a minima/maxima depending on eddy type\")\n if cyclone==1:\n idx = np.unravel_index(eddy_data[1].argmax(), eddy_data[1].shape)\n ctrCoord = lon[lonIdxs[idx[0]]], lat[latIdxs[idx[1]]]\n logger.info(f\"+++ Argmax center -> lon: {ctrCoord[0]}, Center lat: {ctrCoord[1]}\")\n else:\n idx = np.unravel_index(eddy_data[1].argmin(), eddy_data[1].shape)\n ctrCoord = lon[lonIdxs[idx[0]]], lat[latIdxs[idx[1]]]\n logger.info(f\"+++ Argmin center -> lon: {ctrCoord[0]}, Center lat: {ctrCoord[1]}\")\n\n # New width and height in case we've moved in lon/lat direction\n width, height = abs(lo[0]-lo[-1])+dlon, abs(la[0]-la[-1])+dlat\n\n lon_bnds = ctrCoord[0]-width/2.0, ctrCoord[0]+width/2.0\n lat_bnds = ctrCoord[1]-height/2.0, ctrCoord[1]+height/2.0\n\n # Indeces of current eddy image\n lonIdxs = np.where((lon >= lon_bnds[0]) & (lon <= lon_bnds[1]))[0]\n latIdxs = np.where((lat >= lat_bnds[0]) & (lat <= lat_bnds[1]))[0]\n\n elif response == 'incLon':\n if (lonIdxs[0] <= 0 or lonIdxs[-1] >= nLon-1): \n logger.info(f\"+++ Longitude can't be increased further\")\n else:\n lonIdxs = np.arange(lonIdxs[0]-1, lonIdxs[-1]+2)\n logger.info(f\"+++ Increasing lontitude by 1 cell in both directions to ({lonIdxs[0]}:{lonIdxs[-1]})\")\n elif response == 'incLat':\n if (latIdxs[0] <= 0 or latIdxs[-1] >= nLat-1): \n logger.info(f\"+++ Latitude can't be increased further\")\n else:\n latIdxs = np.arange(latIdxs[0]-1, latIdxs[-1]+2)\n logger.info(f\"+++ Increasing latitude by 1 cell in both directions to ({latIdxs[0]}:{latIdxs[-1]})\")\n elif response == 'decLon':\n lonIdxs = np.arange(lonIdxs[0]+1, lonIdxs[-1])\n logger.info(f\"+++ Decreasing lontitude by 1 cell in both directions to ({lonIdxs[0]}:{lonIdxs[-1]})\")\n elif response == 'decLat':\n latIdxs = np.arange(latIdxs[0]+1, latIdxs[-1])\n logger.info(f\"+++ Decreasing latitude by 1 cell in both directions to ({latIdxs[0]}:{latIdxs[-1]})\")\n eddy_data = np.array([np.zeros((lonIdxs.size,latIdxs.size)) for _ in range(6)]) \n\n #----------------------------------------------------------\n \n lo = lon[lonIdxs]\n la = lat[latIdxs]\n\n #guiEvent, guiValues = show_figure(fig)\n #add = 'Yes' # Bypass GUI selection\n if response=='Yes':\n savedImgCounter = savedImgCounter + 1\n # Create images?\n '''\n dirPath = 'C:/Master/TTK-4900-Master/images/'+dateStr+'/'\n if not os.path.exists(dirPath):\n os.makedirs(dirPath)\n imPath = dirPath + title + f\"_{savedImgCounter}.png\" \n plt.savefig(imPath, bbox_inches='tight')\n '''\n\n sst_train.append([eddy_data[0], cyclone]) # [data, label]\n ssl_train.append([eddy_data[1], cyclone]) \n uvel_train.append([eddy_data[2], cyclone]) \n vvel_train.append([eddy_data[3], cyclone]) \n phase_train.append([eddy_data[4], cyclone]) \n\n logger.info(f\"+++++ Saving image {eddyId} as an eddy\") \n\n else: \n logger.info(f\"+++++ Discarding image {eddyId}\")\n \n # =========================================================\n # ================ Select non-eddy images =================\n # =========================================================\n\n if savedImgCounter <= 0:\n logger.info(f\"+++++ No eddies found\")\n continue \n\n # Subgrid (sg) longitude and latitude length\n sgLon, sgLat = dim.find_avg_dim(sst_train, start_axis=0) \n logger.info(f\"+++++ Using average dimensions ({sgLon}, {sgLat}) for non-eddy\")\n\n loRange, laRange = range(0, nLon, sgLon), range(0, nLat, sgLat)\n \n # Create OW array of compatible dimensions for comparing masks\n OW_noeddy = OW[:loRange[-1],:laRange[-1]]\n OW_noeddy = create_subgrids( np.ma.masked_where(OW_noeddy < -0.8, OW_noeddy), sgLon, sgLat, 1 )\n\n # Get a 2d grid of indeces -> make it moldable to the average grid -> convert to subgrids\n idx_subgrids = create_subgrids( np.array( index_list(nLon, nLat) )[:loRange[-1],:laRange[-1]], sgLon, sgLat, 2 )\n\n noneddy_idx_subgrids = []\n for i, grid in enumerate(OW_noeddy):\n if not np.ma.is_masked(grid):\n noneddy_idx_subgrids.append(idx_subgrids[i])\n\n nNoneddies = len(noneddy_idx_subgrids)\n data_noeddy = np.array([[np.zeros((sgLon,sgLat)) for _ in range(nNoneddies)] for _ in range(6)])\n \n # Shuffle the noneddies and loop thorugh untill we have chosen the same amount of non-eddies as eddies\n random.shuffle(noneddy_idx_subgrids)\n added = 0\n for grid_id, idx_grid in enumerate(noneddy_idx_subgrids):\n OW_ = np.zeros((idx_grid.shape[:2]))\n for i in range(len(idx_grid)):\n for j in range(len(idx_grid[0])):\n idx = idx_grid[i,j][0], idx_grid[i,j][1]\n for k in range(len(data_noeddy)):\n data_noeddy[k,grid_id,i,j] = datasets[k][idx]\n #print(idx_grid)\n lo, la = lon[idx_grid[:,0,0]], lat[idx_grid[0,:,1]]\n title = dateStr + \"_noneddy\"\n add = plot_grids(data_noeddy[:,grid_id,:,:], lo, la, None, title)\n if add=='Yes':\n added = added + 1\n sst_train.append([data_noeddy[0,grid_id,:,:], 0]) # [data, label]\n ssl_train.append([data_noeddy[0,grid_id,:,:], 0]) \n uvel_train.append([data_noeddy[0,grid_id,:,:], 0]) \n vvel_train.append([data_noeddy[0,grid_id,:,:], 0]) \n phase_train.append([data_noeddy[0,grid_id,:,:], 0])\n logger.info(f\"+++++ Saving noneddy\") \n if added >= savedImgCounter:\n break\n\n # =========================================================\n # ============== Interpolate ==============\n # =========================================================\n\n #sst_out = np.array(sst_train)\n #ssl_out = np.array(ssl_train)\n #uvel_out = np.array(uvel_train)\n #vvel_out = np.array(vvel_train)\n #phase_out = np.array(phase_train)\n #nTeddies = sst_out.shape[0]\n\n\n logger.info(f\"Compressing and storing training data so far\")\n\n\n # =========================================================\n # ========== Save data as compressed numpy array ==========\n # =========================================================\n\n save_npz_array( (sst_train, ssl_train, uvel_train, vvel_train, phase_train) )\n\n\ndef adjustment_data():\n ''' Method to run the ML model to provide correctional non-eddy images for the model '''\n\n ncpath = 'C:/Master/data/cmems_data/global_10km/2018/phys_noland_2018_001.nc'\n\n (ds,t,lon,lat,depth,uvel_full,vvel_full,sst_full,ssl_full) = load_netcdf4(ncpath)\n\n ssl_probLim = 0.95\n phase_probLim = 0.35\n stepSize = 8\n scaler = MinMaxScaler(feature_range=(-1,1))\n\n clf = load_model('models/new/cnn_mult_full.h5')\n\n winW, winH = int(14), int(8)\n dSize = (winW, winH)\n\n # Lists that will hold the training data\n sst_train = []\n ssl_train = []\n uvel_train = []\n vvel_train = []\n phase_train = []\n nDataset = 5\n\n # Shuffle the time so that the expert won't see the same long-lasting eddies\n for i, day in enumerate(random.sample(range(0, len(t)), len(t))): \n\n ssl = np.array(ssl_full[day].T, dtype='float32') \n sst = np.array(sst_full[day].T, dtype='float32') \n uvel = np.array(uvel_full[day,0].T, dtype='float32') \n vvel = np.array(vvel_full[day,0].T, dtype='float32') \n with np.errstate(all='ignore'): # Disable zero div warning\n phase = xr.ufuncs.rad2deg( xr.ufuncs.arctan2(vvel, uvel) ) + 180\n\n shape = ssl.shape\n ssl_scaled = scaler.fit_transform(ssl)\n uvel_scaled = scaler.fit_transform(uvel)\n vvel_scaled = scaler.fit_transform(vvel)\n phase_scaled = scaler.fit_transform(phase)\n\n # loop over the sliding window of indeces\n for x, y, (lonIdxs, latIdxs) in sliding_window(ssl, stepSize=stepSize, windowSize=dSize):\n\n if lonIdxs[-1] >= shape[0] or latIdxs[-1] >= shape[1]:\n continue\n dSize = (winH, winW)\n # Window indexed data and resizing from a smaller window to model size\n sst_wind = np.array([[sst[i,j] for j in latIdxs] for i in lonIdxs])\n ssl_wind = np.array([[ssl[i,j] for j in latIdxs] for i in lonIdxs])\n ssl_scaled_wind = np.array([[ssl_scaled[i,j] for j in latIdxs] for i in lonIdxs])\n phase_wind = np.array([[phase[i,j] for j in latIdxs] for i in lonIdxs])\n phase_scaled_wind = np.array([[phase_scaled[i,j] for j in latIdxs] for i in lonIdxs])\n uvel_wind = np.array([[uvel[i,j] for j in latIdxs] for i in lonIdxs])\n uvel_scaled_wind = np.array([[uvel_scaled[i,j] for j in latIdxs] for i in lonIdxs])\n vvel_wind = np.array([[vvel[i,j] for j in latIdxs] for i in lonIdxs])\n vvel_scaled_wind = np.array([[vvel_scaled[i,j] for j in latIdxs] for i in lonIdxs])\n\n #channels = [ssl_scaled_wind, uvel_scaled_wind, vvel_scaled_wind, phase_scaled_wind]\n channels = [uvel_scaled_wind, vvel_scaled_wind]\n nChannels = len(channels)\n X_cnn = np.zeros((winW,winH,nChannels))\n for lo in range(winW): # Row\n for la in range(winH): # Column\n #X_cnn[i,lo,la,0] = X[0][i][lo][la]\n for c in range(nChannels): # Channels\n X_cnn[lo,la,c] = channels[c][lo][la]\n\n X_cnn = np.expand_dims(X_cnn, 0)\n\n lo, la = lon[lonIdxs], lat[latIdxs]\n\n # Predict and receive probability\n prob = clf.predict(X_cnn)\n\n # By default we say we have a non-eddy (cyclone flag)\n cyclone_f = 0\n # If second column is larger than the boundary, we have a anti-cyclone\n if prob[0,1] > ssl_probLim: \n print('anti-cyclone | prob: {} | lon: [{}, {}] | lat: [{}, {}]'.format(prob[0,1]*100,lo[0],lo[-1],la[0],la[-1]))\n cyclone_f = -1\n # If third column is larger, we have a cyclone\n elif prob[0,2] > ssl_probLim:\n print('cyclone | prob: {} | lon: [{}, {}, lat: [{}, {}]'.format(prob[0,2]*100,lo[0],lo[-1],la[0],la[-1])) \n cyclone_f = 1\n\n eddy_data = [sst_wind, ssl_wind, uvel_wind, vvel_wind, phase_wind]\n \n # Plot and flag if the prediction is correct or not\n yes_no = plot_grids(eddy_data, lo, la, None, check_cyclone(cyclone_f))\n # Add to training data if expert labels it correct\n if yes_no == 'Yes':\n sst_train.append([sst_wind, cyclone_f]) \n ssl_train.append([ssl_wind, cyclone_f]) \n uvel_train.append([ssl_wind, cyclone_f]) \n vvel_train.append([ssl_wind, cyclone_f]) \n phase_train.append([ssl_wind, cyclone_f])\n # If not, change the label to non-eddy\n elif yes_no == 'No':\n sst_train.append([sst_wind, 0]) \n ssl_train.append([ssl_wind, 0]) \n uvel_train.append([ssl_wind, 0]) \n vvel_train.append([ssl_wind, 0]) \n phase_train.append([ssl_wind, 0])\n \n # Every 10 sample add to the compressed array\n if i%10==0:\n # ADD TO THE COMPRESSED NUMPY ARRAY\n savedir = 'C:/Master/TTK-4900-Master/data/adjustment_data/'\n ds = [sst_train, ssl_train, uvel_train, vvel_train, phase_train]\n save_npz_array(ds, savedir)\n\nif __name__ == '__main__':\n #semi_automatic_training()\n adjustment_data()\n"
] | [
[
"matplotlib.colors.BoundaryNorm",
"numpy.expand_dims",
"numpy.sqrt",
"matplotlib.pyplot.get_cmap",
"numpy.max",
"numpy.ma.masked_where",
"numpy.ma.is_masked",
"numpy.where",
"sklearn.preprocessing.MinMaxScaler",
"numpy.arange",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.zeros",
"numpy.append",
"numpy.errstate",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.savez_compressed",
"matplotlib.ticker.MaxNLocator"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Nivolves2000/hospital-crm | [
"16ed4448e7aa720c102f6fcd56815df4e491aad1"
] | [
"backend/SystemBack1/FindLiverClass.py"
] | [
"import os\r\nimport json\r\nimport numpy as np\r\nimport pandas as pd\r\nimport FeaturesStack as FS\r\n\r\n\r\ndef calculate_gmdh_model(img_f):\r\n if task_type == \"1\":\r\n if sensor_type == \"convex\":\r\n prob = (\r\n -0.946477\r\n + img_f[\"std_vert\"] * np.cbrt(img_f[\"P95(1)_vert\"]) * 0.0171222\r\n + np.power(img_f[\"balx2_hor\"], 3)\r\n * np.sin(img_f[\"dif12_hor\"])\r\n * (-1.583e-05)\r\n + img_f[\"P5_vert\"] * np.cos(img_f[\"pair6664_vert\"]) * (-0.007739)\r\n + np.cbrt(img_f[\"x2_vert\"]) * np.cbrt(img_f[\"balx2_vert\"]) * 0.0831053\r\n + np.cos(img_f[\"pair3947_hor\"]) * np.cos(img_f[\"dif12_vert\"]) * 0.413282\r\n + np.cos(img_f[\"pair4639_hor\"])\r\n * np.cos(img_f[\"pair6967_vert\"])\r\n * (-0.141326)\r\n + np.cbrt(img_f[\"maxfreq_hor\"])\r\n * np.cbrt(img_f[\"mean(1)_vert\"])\r\n * 0.396514\r\n + np.cos(img_f[\"pair4639_hor\"])\r\n * np.arctan(img_f[\"pair5555_vert\"])\r\n * 0.123721\r\n + np.sqrt(img_f[\"pair5045_hor\"])\r\n * np.cos(img_f[\"pair4846_hor\"])\r\n * (-0.110306)\r\n + np.sqrt(img_f[\"maxfreq_orig\"])\r\n * np.power(img_f[\"balx2_hor\"], 3)\r\n * 1.51139e-05\r\n + img_f[\"dif13_vert\"] * np.cbrt(img_f[\"x2_orig\"]) * 0.0276597\r\n )\r\n elif sensor_type == \"linear\":\r\n prob = (\r\n 0.521463\r\n + np.cos(img_f[\"fractal_dim\"])\r\n * np.arctan(img_f[\"pair1526_hor\"])\r\n * (-0.510109)\r\n + np.cbrt(img_f[\"x2_orig\"]) * np.arctan(img_f[\"std(3)_hor\"]) * 0.320271\r\n + np.sin(img_f[\"Q1_vert\"]) * np.cos(img_f[\"skew(2)_vert\"]) * 0.347042\r\n + np.cbrt(img_f[\"median(2)_hor\"]) * np.cos(img_f[\"Q3_vert\"]) * 0.120014\r\n + np.sin(img_f[\"x1_orig\"]) * np.sin(img_f[\"pair5050_vert\"]) * 0.149371\r\n + np.power(img_f[\"kurt(1)_hor\"], 2)\r\n * np.cos(img_f[\"pair2820_hor\"])\r\n * 0.107874\r\n + np.power(img_f[\"pair4845_vert\"], 3)\r\n * np.cos(img_f[\"mean(3)_vert\"])\r\n * 1.95106e-05\r\n + np.cos(img_f[\"mean(3)_vert\"])\r\n * np.arctan(img_f[\"mean(2)_hor\"])\r\n * (-0.115669)\r\n )\r\n\r\n elif sensor_type == \"reinforced_linear\":\r\n prob = (\r\n 0.564665\r\n + np.cbrt(img_f[\"pair2420_hor\"])\r\n * np.arctan(img_f[\"P5(1)_hor\"])\r\n * (-0.185308)\r\n + np.sin(img_f[\"std_hor\"]) * np.sin(img_f[\"pair5359_vert\"]) * 0.529036\r\n + np.cos(img_f[\"range_vert\"])\r\n * np.cos(img_f[\"pair7878_vert\"])\r\n * (-0.326662)\r\n + np.sin(img_f[\"pair6574_vert\"])\r\n * np.cos(img_f[\"Q3(1)_hor\"])\r\n * (-0.337944)\r\n + np.cos(img_f[\"IQR_vert\"])\r\n * np.cos(img_f[\"median(2)_vert\"])\r\n * (-0.237002)\r\n + np.sin(img_f[\"pair5359_vert\"])\r\n * np.cos(img_f[\"median(2)_vert\"])\r\n * (-0.118517)\r\n + np.cos(img_f[\"median(2)_vert\"])\r\n * np.arctan(img_f[\"P5(1)_hor\"])\r\n * 0.138423\r\n + np.cos(img_f[\"pair6574_vert\"])\r\n * np.arctan(img_f[\"pair5649_vert\"])\r\n * 0.051217\r\n + np.sin(img_f[\"pair5359_vert\"])\r\n * np.arctan(img_f[\"x2_vert\"])\r\n * 0.296591\r\n + img_f[\"dif23_vert\"] * np.cos(img_f[\"dif23_vert\"]) * 0.914249\r\n )\r\n else:\r\n prob = 0\r\n elif task_type == \"2\":\r\n prob = 0\r\n else:\r\n prob = 0\r\n return prob, 1 if prob < 0.5 else 2\r\n\r\n\r\ndef forest_prediction(img_f):\r\n if task_type == \"1\":\r\n with open(os.path.join(cur_dir, \"SystemBack/SelfOrganizationForests/\" + sensor_type + \".json\")) as f:\r\n forest = json.load(f)\r\n ypl = [] # y_pred list\r\n for obj in forest:\r\n tree = pd.DataFrame(obj[\"tree\"])\r\n leaf = 1\r\n index = 0\r\n flag = False\r\n y_pred = 0\r\n while not flag:\r\n node = tree.loc[index]\r\n if node[\"side\"] == 1:\r\n if img_f[node[\"feature\"]] < node[\"threshold\"]:\r\n y_pred = 1\r\n else:\r\n y_pred = 2\r\n else:\r\n if img_f[node[\"feature\"]] < node[\"threshold\"]:\r\n y_pred = 2\r\n else:\r\n y_pred = 1\r\n try:\r\n index = np.where(\r\n (tree[\"previous_leaf\"] == leaf)\r\n & (tree[\"previous_direction\"] == y_pred)\r\n )[0][0]\r\n leaf = tree.loc[index][\"leaf_number\"]\r\n except:\r\n flag = True\r\n ypl.append(y_pred)\r\n ypl = np.asarray(ypl)\r\n ypl_sum = np.sum(ypl == 1) + np.sum(ypl == 2)\r\n if np.sum(ypl == 1) > np.sum(ypl == 2):\r\n y_pred = 1\r\n forest_prob = (np.sum(ypl == 1) / ypl_sum) * 100\r\n else:\r\n y_pred = 2\r\n forest_prob = (np.sum(ypl == 2) / ypl_sum) * 100\r\n elif task_type == \"2\":\r\n forest_prob = 0\r\n y_pred = 0\r\n else:\r\n forest_prob = 0\r\n y_pred = 0\r\n return forest_prob, y_pred\r\n\r\n\r\ndef get_mean_signs(img_f):\r\n if task_type == \"1\":\r\n if sensor_type == \"convex\":\r\n feature1, feature2, feature3 = (\r\n \"cbrt(P95(1)_vert)\",\r\n \"cos(dif12_vert)\",\r\n \"std_vert\",\r\n )\r\n threshold1, threshold2, threshold3 = (\r\n 5.0132979349645845,\r\n 0.6306169224667781,\r\n 7.127663290343068,\r\n )\r\n value1, value2, value3 = (\r\n np.cbrt(img_f[\"P95(1)_vert\"]),\r\n np.cos(img_f[\"dif12_vert\"]),\r\n img_f[\"std_vert\"],\r\n )\r\n if value1 < threshold1:\r\n res1 = \"Печень в норме\"\r\n else:\r\n res1 = \"Печень не в норме\"\r\n if value2 < threshold2:\r\n res2 = \"Печень не в норме\"\r\n else:\r\n res2 = \"Печень в норме\"\r\n if value3 < threshold3:\r\n res3 = \"Печень в норме\"\r\n else:\r\n res3 = \"Печень не в норме\"\r\n elif sensor_type == \"linear\":\r\n feature1, feature2, feature3 = (\r\n \"cbrt(x2_orig)\",\r\n \"arctan(pair1526_hor)\",\r\n \"cos(fractal_dim)\",\r\n )\r\n threshold1, threshold2, threshold3 = (\r\n 0.6440777961495892,\r\n 1.3522438545232742,\r\n 0.41596845937104104,\r\n )\r\n value1, value2, value3 = (\r\n np.cbrt(img_f[\"x2_orig\"]),\r\n np.arctan(img_f[\"pair1526_hor\"]),\r\n np.cos(img_f[\"fractal_dim\"]),\r\n )\r\n if value1 < threshold1:\r\n res1 = \"Печень в норме\"\r\n else:\r\n res1 = \"Печень не в норме\"\r\n if value2 < threshold2:\r\n res2 = \"Печень не в норме\"\r\n else:\r\n res2 = \"Печень в норме\"\r\n if value3 < threshold3:\r\n res3 = \"Печень не в норме\"\r\n else:\r\n res3 = \"Печень в норме\"\r\n elif sensor_type == \"reinforced_linear\":\r\n feature1, feature2, feature3 = (\r\n \"cos(range_vert)\",\r\n \"cbrt(pair2420_hor)\",\r\n \"sin(pair5359_vert)\",\r\n )\r\n threshold1, threshold2, threshold3 = (\r\n 0.9998433086476912,\r\n 1.6407957194770635,\r\n -0.5549728719823037,\r\n )\r\n value1, value2, value3 = (\r\n np.cos(img_f[\"range_vert\"]),\r\n np.cbrt(img_f[\"pair2420_hor\"]),\r\n np.sin(img_f[\"pair5359_vert\"]),\r\n )\r\n if value1 < threshold1:\r\n res1 = \"Печень в норме\"\r\n else:\r\n res1 = \"Печень не в норме\"\r\n if value2 < threshold2:\r\n res2 = \"Печень не в норме\"\r\n else:\r\n res2 = \"Печень в норме\"\r\n if value3 < threshold3:\r\n res3 = \"Печень не в норме\"\r\n else:\r\n res3 = \"Печень в норме\"\r\n else:\r\n feature1, feature2, feature3 = \"\", \"\", \"\"\r\n threshold1, threshold2, threshold3 = 0, 0, 0\r\n value1, value2, value3 = 0, 0, 0\r\n res1, res2, res3 = 0, 0, 0\r\n elif task_type == \"2\":\r\n feature1, feature2, feature3 = \"\", \"\", \"\"\r\n threshold1, threshold2, threshold3 = 0, 0, 0\r\n value1, value2, value3 = 0, 0, 0\r\n res1, res2, res3 = 0, 0, 0\r\n else:\r\n feature1, feature2, feature3 = \"\", \"\", \"\"\r\n threshold1, threshold2, threshold3 = 0, 0, 0\r\n value1, value2, value3 = 0, 0, 0\r\n res1, res2, res3 = 0, 0, 0\r\n return [\r\n {\"feature\": feature1, \"threshold\": threshold1, \"value\": value1, \"result\": res1},\r\n {\"feature\": feature2, \"threshold\": threshold2, \"value\": value2, \"result\": res2},\r\n {\"feature\": feature3, \"threshold\": threshold3, \"value\": value3, \"result\": res3},\r\n ]\r\n\r\n\r\ndef get_all_features():\r\n with open(os.path.join(cur_dir, \"SystemBack/Features/\", filename)) as f:\r\n feature_names = json.load(f)[\"features\"]\r\n with open(os.path.join(cur_dir, \"SystemBack/BestGrad/\", filename)) as f:\r\n best_grad = json.load(f)\r\n with open(os.path.join(cur_dir, \"SystemBack/MaxFeatures/\", filename)) as f:\r\n best_pairs = json.load(f)\r\n\r\n img_f = []\r\n\r\n # fractal dimension of image\r\n img_f.append(FS.mink_val(path))\r\n\r\n # initial matrix\r\n init_matrix = np.concatenate(FS.get_greyscale_matrix(path), axis=None)\r\n img_f.append((np.sum(init_matrix == np.amin(init_matrix)) / init_matrix.size) * 100)\r\n img_f.append((np.sum(init_matrix == np.amax(init_matrix)) / init_matrix.size) * 100)\r\n\r\n # glcm\r\n glcm = FS.get_glcm(init_matrix)\r\n img_f = FS.get_x1x2x3(\r\n glcm, img_f, best_grad[\"initstandard\"], best_grad[\"initbalanced\"]\r\n )\r\n\r\n # horizontal differential matrix\r\n img_f, diff_matrix = FS.get_norm_features(\r\n FS.get_greyscale_matrix(path),\r\n img_f,\r\n \"hor\",\r\n best_grad[\"horstandard\"],\r\n best_grad[\"horbalanced\"],\r\n best_pairs[\"hor\"],\r\n flag=True,\r\n )\r\n\r\n # vertical differential matrix\r\n img_f = FS.get_norm_features(\r\n FS.get_greyscale_matrix(path),\r\n img_f,\r\n \"vert\",\r\n best_grad[\"vertstandard\"],\r\n best_grad[\"vertbalanced\"],\r\n best_pairs[\"vert\"],\r\n )\r\n return pd.DataFrame([img_f], columns=feature_names).iloc[0], diff_matrix\r\n\r\n\r\ndef get_classification_results(parameters):\r\n # task_type: 1 - норма/патология, 2 - стадия фиброза\r\n global sensor_type, path, task_type, cur_dir, filename\r\n sensor_type, path, task_type = (\r\n parameters[\"sensor_type\"],\r\n parameters[\"path\"],\r\n parameters[\"task_type\"],\r\n )\r\n cur_dir, filename = parameters[\"cur_dir\"], parameters[\"filename\"]\r\n\r\n (\r\n img_f,\r\n diff_matrix,\r\n ) = get_all_features() # img_f - image features (признаки изображения)\r\n\r\n # МГУА\r\n gmdh_prob, gmdh_liver_class = calculate_gmdh_model(img_f)\r\n if gmdh_prob > 1 or gmdh_prob < 0:\r\n gmdh_prob = 100\r\n elif gmdh_liver_class == 2:\r\n gmdh_prob = round(gmdh_prob * 100, 1)\r\n elif gmdh_liver_class == 1:\r\n gmdh_prob = round((1 - gmdh_prob) * 100, 1)\r\n gmdh_result = \"Печень в норме\" if gmdh_liver_class == 1 else \"Печень не в норме\"\r\n\r\n # Лес самоорганизации\r\n forest_prob, forest_liver_class = forest_prediction(img_f)\r\n forest_result = \"Печень в норме\" if forest_liver_class == 1 else \"Печень не в норме\"\r\n\r\n # Пороги трёх наилучших признаков\r\n mean_signs = get_mean_signs(img_f)\r\n\r\n return (\r\n {\r\n \"gmdh_result\": gmdh_result,\r\n \"gmdh_probability\": gmdh_prob,\r\n \"forest_result\": forest_result,\r\n \"forest_probability\": forest_prob,\r\n \"mean_signs\": mean_signs,\r\n },\r\n diff_matrix,\r\n )\r\n\r\n"
] | [
[
"numpy.amax",
"numpy.sqrt",
"numpy.arctan",
"numpy.power",
"numpy.asarray",
"numpy.amin",
"numpy.cos",
"pandas.DataFrame",
"numpy.sin",
"numpy.cbrt",
"numpy.where",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
RayshineRen/Introduction_to_Data_Science_in_Python | [
"b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71"
] | [
"week2/week2.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 14 19:28:11 2020\r\n\r\n@author: Ray\r\n@email: [email protected]\r\n@wechat: RayTing0305\r\n\"\"\"\r\n\r\n###chapter5\r\n\r\nimport pandas as pd\r\nfrom pandas import Series, DataFrame\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nnp.random.seed(12345)\r\nplt.rc('figure', figsize=(10, 6))\r\nPREVIOUS_MAX_ROWS = pd.options.display.max_rows\r\npd.options.display.max_rows = 20\r\nnp.set_printoptions(precision=4, suppress=True)\r\n\r\n\r\n### Series\r\n\r\nobj = pd.Series([4, 7, -5, 3])\r\nobj_array = obj.values\r\nobj_range = obj.index\r\n\r\nobj2 = pd.Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])\r\nobj2_array = obj2.values\r\nobj2_range = obj2.index\r\n\r\nobj3 = obj2[['a','c','d']]\r\nobj3_array = obj3.values\r\nobj3_range = obj3.index\r\n\r\nobj4 = obj2[obj2>0]\r\nobj5 = obj2*2\r\nobj6 = np.exp(obj2)\r\n\r\n#print('b' in obj2)\r\n#print('e' in obj2)\r\n\r\n\r\nsdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}\r\nobj7 = pd.Series(sdata)\r\n\r\nstates = ['California', 'Ohio', 'Oregon', 'Texas']\r\nobj8 = pd.Series(sdata, index=states)\r\n\r\n#print(pd.isnull(obj8))\r\n#print(pd.notnull(obj8))\r\n\r\nobj9 = obj7 + obj8\r\n\r\nobj8.name = 'population'\r\nobj8.index.name = 'state'\r\n\r\n\r\n\r\n####DataFrame\r\n\r\ndata = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],\r\n 'year': [2000, 2001, 2002, 2001, 2002, 2003],\r\n 'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}\r\nframe = pd.DataFrame(data)\r\nprint(frame.state)\r\n#print(frame.head())\r\n#print(frame.columns)\r\n\r\nframe = pd.DataFrame(data, columns=['year', 'state', 'pop'])\r\n\r\nframe2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],\r\n index=['one', 'two', 'three', 'four',\r\n 'five', 'six'])\r\nfc1 = frame2['state']\r\nfc2 = frame2.state\r\n#print(fc1==fc2)\r\n#print(id(fc1)==id(fc2))\r\n\r\nfr1 = frame2.loc['two']\r\n#print(fr1)\r\n\r\nframe2['debt'] = np.arange(6.)\r\n#print(frame2)\r\n\r\nval = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])\r\nframe2['debt'] = val\r\n#print(frame2)\r\n\r\nframe2['eastern'] = frame2.state == 'Ohio'\r\n\r\ndel frame2['eastern']\r\n\r\npop = {'Nevada': {2001: 2.4, 2002: 2.9},\r\n 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}\r\nframe3 = pd.DataFrame(pop)\r\n\r\n#print(frame3.T)\r\n\r\nframe4 = pd.DataFrame(pop, index=[2001, 2002, 2003])\r\n\r\npdata = {'Ohio': frame3['Ohio'][:-1],\r\n 'Nevada': frame3['Nevada'][:2]}\r\nframe5 = pd.DataFrame(pdata)\r\n\r\nframe3.index.name='year'\r\nframe3.columns.name = 'state'\r\n#print(frame3.values)\r\n\r\n### Index Objects\r\nobj = pd.Series(range(3), index=['a', 'b', 'c'])\r\nindex = obj.index\r\n\r\n##index[1] = 'd' # TypeError\r\n\r\nlabels = pd.Index(np.arange(3))\r\ndup_labels = pd.Index(['foo', 'foo', 'bar', 'bar'])\r\nframe6 = pd.Series(np.arange(4), index = dup_labels)\r\n#print(frame6['foo'])\r\n\r\n\r\n### Essential Functionality\r\n\r\nobj = pd.Series([4.5, 7.2, -5.3, 3.6], index=['d', 'b', 'a', 'c'])\r\nobj2 = obj.reindex(['a', 'b', 'c', 'd', 'e'])\r\nobj3 = pd.Series(['blue', 'purple', 'yellow'], index=[0, 2, 4])\r\nobj4 = obj3.reindex(range(6), method='ffill')\r\n\r\nframe = pd.DataFrame(np.arange(9).reshape((3, 3)),\r\n index=['a', 'c', 'd'],\r\n columns=['Ohio', 'Texas', 'California'])\r\nframe2 = frame.reindex(['a', 'b', 'c', 'd'])\r\n\r\nstates = ['Texas', 'Utah', 'California']\r\nframe3 = frame.reindex(columns=states)\r\n\r\n#fr = frame.loc[['a', 'c'], states]\r\n\r\n\r\n## Dropping Entries from an Axis\r\nobj = pd.Series(np.arange(5.), index=['a', 'b', 'c', 'd', 'e'])\r\nnew_obj = obj.drop(['c', 'd'])\r\n\r\n\r\nobj = pd.Series(np.arange(4.), index=['a', 'b', 'c', 'd'])\r\nobj2 = obj[['b', 'a', 'd']]\r\nobj3 = obj[[1, 3]]\r\nobj4 = obj[obj<2]\r\nobj5 = obj['b':'e']\r\nobj['b':'c'] = 5\r\n\r\ndata = pd.DataFrame(np.arange(16).reshape((4, 4)),\r\n index=['Ohio', 'Colorado', 'Utah', 'New York'],\r\n columns=['one', 'two', 'three', 'four'])\r\n#print(data)\r\n#print(data[:2])\r\n#print(data[data['three']>5])\r\n#data[data<5]=0\r\n#print(data)\r\n\r\nloc = data.loc['Colorado', ['two', 'three']]\r\n\r\nloc2 = data.iloc[2, [3, 0, 1]]\r\n#print(loc2)\r\nloc3 = data.iloc[2]\r\nloc4 = data.iloc[[1, 2], [3, 0, 1]]\r\n#print(loc4)\r\nloc5 = data.iloc[:, :3][data.three > 5]\r\n#print(loc5)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"pandas.Series",
"numpy.random.seed",
"numpy.arange",
"numpy.set_printoptions",
"matplotlib.pyplot.rc",
"pandas.Index",
"pandas.DataFrame",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
joeranbosma/nnDetection | [
"2ebbf1cdc8a8794c73e325f06fea50632c78ae8c",
"2ebbf1cdc8a8794c73e325f06fea50632c78ae8c",
"2ebbf1cdc8a8794c73e325f06fea50632c78ae8c",
"2ebbf1cdc8a8794c73e325f06fea50632c78ae8c"
] | [
"nndet/ptmodule/retinaunet/base.py",
"nndet/io/itk.py",
"nndet/losses/classification.py",
"nndet/evaluator/detection/hist.py"
] | [
"\"\"\"\nCopyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport copy\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom functools import partial\nfrom typing import Callable, Hashable, Sequence, Dict, Any, Type\n\nimport torch\nimport numpy as np\nfrom loguru import logger\nfrom torchvision.models.detection.rpn import AnchorGenerator\n\nfrom nndet.utils.tensor import to_numpy\nfrom nndet.evaluator.det import BoxEvaluator\nfrom nndet.evaluator.seg import SegmentationEvaluator\n\nfrom nndet.core.retina import BaseRetinaNet\nfrom nndet.core.boxes.matcher import IoUMatcher\nfrom nndet.core.boxes.sampler import HardNegativeSamplerBatched\nfrom nndet.core.boxes.coder import CoderType, BoxCoderND\nfrom nndet.core.boxes.anchors import get_anchor_generator\nfrom nndet.core.boxes.ops import box_iou\nfrom nndet.core.boxes.anchors import AnchorGeneratorType\n\nfrom nndet.ptmodule.base_module import LightningBaseModuleSWA, LightningBaseModule\n\nfrom nndet.arch.conv import Generator, ConvInstanceRelu, ConvGroupRelu\nfrom nndet.arch.blocks.basic import StackedConvBlock2\nfrom nndet.arch.encoder.abstract import EncoderType\nfrom nndet.arch.encoder.modular import Encoder\nfrom nndet.arch.decoder.base import DecoderType, BaseUFPN, UFPNModular\nfrom nndet.arch.heads.classifier import ClassifierType, CEClassifier\nfrom nndet.arch.heads.regressor import RegressorType, L1Regressor\nfrom nndet.arch.heads.comb import HeadType, DetectionHeadHNM\nfrom nndet.arch.heads.segmenter import SegmenterType, DiCESegmenter\n\nfrom nndet.training.optimizer import get_params_no_wd_on_norm\nfrom nndet.training.learning_rate import LinearWarmupPolyLR\n\nfrom nndet.inference.predictor import Predictor\nfrom nndet.inference.sweeper import BoxSweeper\nfrom nndet.inference.transforms import get_tta_transforms, Inference2D\nfrom nndet.inference.loading import get_loader_fn\nfrom nndet.inference.helper import predict_dir\nfrom nndet.inference.ensembler.segmentation import SegmentationEnsembler\nfrom nndet.inference.ensembler.detection import BoxEnsemblerSelective\n\nfrom nndet.io.transforms import (\n Compose,\n Instances2Boxes,\n Instances2Segmentation,\n FindInstances,\n )\n\n\nclass RetinaUNetModule(LightningBaseModuleSWA):\n base_conv_cls = ConvInstanceRelu\n head_conv_cls = ConvGroupRelu\n block = StackedConvBlock2\n encoder_cls = Encoder\n decoder_cls = UFPNModular\n matcher_cls = IoUMatcher\n head_cls = DetectionHeadHNM\n head_classifier_cls = CEClassifier\n head_regressor_cls = L1Regressor\n head_sampler_cls = HardNegativeSamplerBatched\n segmenter_cls = DiCESegmenter\n\n def __init__(self,\n model_cfg: dict,\n trainer_cfg: dict,\n plan: dict,\n **kwargs\n ):\n \"\"\"\n RetinaUNet Lightning Module Skeleton\n \n Args:\n model_cfg: model configuration. Check :method:`from_config_plan`\n for more information\n trainer_cfg: trainer information\n plan: contains parameters which were derived from the planning\n stage\n \"\"\"\n super().__init__(\n model_cfg=model_cfg,\n trainer_cfg=trainer_cfg,\n plan=plan,\n )\n\n _classes = [f\"class{c}\" for c in range(plan[\"architecture\"][\"classifier_classes\"])]\n self.box_evaluator = BoxEvaluator.create(\n classes=_classes,\n fast=True,\n save_dir=None,\n )\n self.seg_evaluator = SegmentationEvaluator.create()\n\n self.pre_trafo = Compose(\n FindInstances(\n instance_key=\"target\",\n save_key=\"present_instances\",\n ),\n Instances2Boxes(\n instance_key=\"target\",\n map_key=\"instance_mapping\",\n box_key=\"boxes\",\n class_key=\"classes\",\n present_instances=\"present_instances\",\n ),\n Instances2Segmentation(\n instance_key=\"target\",\n map_key=\"instance_mapping\",\n present_instances=\"present_instances\",\n )\n )\n\n self.eval_score_key = \"mAP_IoU_0.10_0.50_0.05_MaxDet_100\"\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n Computes a single training step\n See :class:`BaseRetinaNet` for more information\n \"\"\"\n with torch.no_grad():\n batch = self.pre_trafo(**batch)\n\n losses, _ = self.model.train_step(\n images=batch[\"data\"],\n targets={\n \"target_boxes\": batch[\"boxes\"],\n \"target_classes\": batch[\"classes\"],\n \"target_seg\": batch['target'][:, 0] # Remove channel dimension\n },\n evaluation=False,\n batch_num=batch_idx,\n )\n loss = sum(losses.values())\n return {\"loss\": loss, **{key: l.detach().item() for key, l in losses.items()}}\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\n Computes a single validation step (same as train step but with\n additional prediciton processing)\n See :class:`BaseRetinaNet` for more information\n \"\"\"\n with torch.no_grad():\n batch = self.pre_trafo(**batch)\n targets = {\n \"target_boxes\": batch[\"boxes\"],\n \"target_classes\": batch[\"classes\"],\n \"target_seg\": batch['target'][:, 0] # Remove channel dimension\n }\n losses, prediction = self.model.train_step(\n images=batch[\"data\"],\n targets=targets,\n evaluation=True,\n batch_num=batch_idx,\n )\n loss = sum(losses.values())\n\n self.evaluation_step(prediction=prediction, targets=targets)\n return {\"loss\": loss.detach().item(),\n **{key: l.detach().item() for key, l in losses.items()}}\n\n def evaluation_step(\n self,\n prediction: dict,\n targets: dict,\n ):\n \"\"\"\n Perform an evaluation step to add predictions and gt to\n caching mechanism which is evaluated at the end of the epoch\n\n Args:\n prediction: predictions obtained from model\n 'pred_boxes': List[Tensor]: predicted bounding boxes for\n each image List[[R, dim * 2]]\n 'pred_scores': List[Tensor]: predicted probability for\n the class List[[R]]\n 'pred_labels': List[Tensor]: predicted class List[[R]]\n 'pred_seg': Tensor: predicted segmentation [N, dims]\n targets: ground truth\n `target_boxes` (List[Tensor]): ground truth bounding boxes\n (x1, y1, x2, y2, (z1, z2))[X, dim * 2], X= number of ground\n truth boxes in image\n `target_classes` (List[Tensor]): ground truth class per box\n (classes start from 0) [X], X= number of ground truth\n boxes in image\n `target_seg` (Tensor): segmentation ground truth (if seg was\n found in input dict)\n \"\"\"\n pred_boxes = to_numpy(prediction[\"pred_boxes\"])\n pred_classes = to_numpy(prediction[\"pred_labels\"])\n pred_scores = to_numpy(prediction[\"pred_scores\"])\n\n gt_boxes = to_numpy(targets[\"target_boxes\"])\n gt_classes = to_numpy(targets[\"target_classes\"])\n gt_ignore = None\n\n self.box_evaluator.run_online_evaluation(\n pred_boxes=pred_boxes,\n pred_classes=pred_classes,\n pred_scores=pred_scores,\n gt_boxes=gt_boxes,\n gt_classes=gt_classes,\n gt_ignore=gt_ignore,\n )\n\n pred_seg = to_numpy(prediction[\"pred_seg\"])\n gt_seg = to_numpy(targets[\"target_seg\"])\n\n self.seg_evaluator.run_online_evaluation(\n seg_probs=pred_seg,\n target=gt_seg,\n )\n\n def training_epoch_end(self, training_step_outputs):\n \"\"\"\n Log train loss to loguru logger\n \"\"\"\n # process and log losses\n vals = defaultdict(list)\n for _val in training_step_outputs:\n for _k, _v in _val.items():\n if _k == \"loss\":\n vals[_k].append(_v.detach().item())\n else:\n vals[_k].append(_v)\n\n for _key, _vals in vals.items():\n mean_val = np.mean(_vals)\n if _key == \"loss\":\n logger.info(f\"Train loss reached: {mean_val:0.5f}\")\n self.log(f\"train_{_key}\", mean_val, sync_dist=True)\n return super().training_epoch_end(training_step_outputs)\n\n def validation_epoch_end(self, validation_step_outputs):\n \"\"\"\n Log val loss to loguru logger\n \"\"\"\n # process and log losses\n vals = defaultdict(list)\n for _val in validation_step_outputs:\n for _k, _v in _val.items():\n vals[_k].append(_v)\n\n for _key, _vals in vals.items():\n mean_val = np.mean(_vals)\n if _key == \"loss\":\n logger.info(f\"Val loss reached: {mean_val:0.5f}\")\n self.log(f\"val_{_key}\", mean_val, sync_dist=True)\n\n # process and log metrics\n self.evaluation_end()\n return super().validation_epoch_end(validation_step_outputs)\n\n def evaluation_end(self):\n \"\"\"\n Uses the cached values from `evaluation_step` to perform the evaluation\n of the epoch\n \"\"\"\n metric_scores, _ = self.box_evaluator.finish_online_evaluation()\n self.box_evaluator.reset()\n\n logger.info(f\"[email protected]:0.5:0.05: {metric_scores['mAP_IoU_0.10_0.50_0.05_MaxDet_100']:0.3f} \"\n f\"[email protected]: {metric_scores['AP_IoU_0.10_MaxDet_100']:0.3f} \"\n f\"[email protected]: {metric_scores['AP_IoU_0.50_MaxDet_100']:0.3f}\")\n\n seg_scores, _ = self.seg_evaluator.finish_online_evaluation()\n self.seg_evaluator.reset()\n metric_scores.update(seg_scores)\n\n logger.info(f\"Proxy FG Dice: {seg_scores['seg_dice']:0.3f}\")\n\n for key, item in metric_scores.items():\n self.log(f'{key}', item, on_step=None, on_epoch=True, prog_bar=False, logger=True)\n\n def configure_optimizers(self):\n \"\"\"\n Configure optimizer and scheduler\n Base configuration is SGD with LinearWarmup and PolyLR learning rate\n schedule\n \"\"\"\n # configure optimizer\n logger.info(f\"Running: initial_lr {self.trainer_cfg['initial_lr']} \"\n f\"weight_decay {self.trainer_cfg['weight_decay']} \"\n f\"SGD with momentum {self.trainer_cfg['sgd_momentum']} and \"\n f\"nesterov {self.trainer_cfg['sgd_nesterov']}\")\n wd_groups = get_params_no_wd_on_norm(self, weight_decay=self.trainer_cfg['weight_decay'])\n optimizer = torch.optim.SGD(\n wd_groups,\n self.trainer_cfg[\"initial_lr\"],\n weight_decay=self.trainer_cfg[\"weight_decay\"],\n momentum=self.trainer_cfg[\"sgd_momentum\"],\n nesterov=self.trainer_cfg[\"sgd_nesterov\"],\n )\n\n # configure lr scheduler\n num_iterations = self.trainer_cfg[\"max_num_epochs\"] * \\\n self.trainer_cfg[\"num_train_batches_per_epoch\"]\n scheduler = LinearWarmupPolyLR(\n optimizer=optimizer,\n warm_iterations=self.trainer_cfg[\"warm_iterations\"],\n warm_lr=self.trainer_cfg[\"warm_lr\"],\n poly_gamma=self.trainer_cfg[\"poly_gamma\"],\n num_iterations=num_iterations\n )\n return [optimizer], {'scheduler': scheduler, 'interval': 'step'}\n\n @classmethod\n def from_config_plan(cls,\n model_cfg: dict,\n plan_arch: dict,\n plan_anchors: dict,\n log_num_anchors: str = None,\n **kwargs,\n ):\n \"\"\"\n Create Configurable RetinaUNet\n\n Args:\n model_cfg: model configurations\n See example configs for more info\n plan_arch: plan architecture\n `dim` (int): number of spatial dimensions\n `in_channels` (int): number of input channels\n `classifier_classes` (int): number of classes\n `seg_classes` (int): number of classes\n `start_channels` (int): number of start channels in encoder\n `fpn_channels` (int): number of channels to use for FPN\n `head_channels` (int): number of channels to use for head\n `decoder_levels` (int): decoder levels to user for detection\n plan_anchors: parameters for anchors (see\n :class:`AnchorGenerator` for more info)\n `stride`: stride\n `aspect_ratios`: aspect ratios\n `sizes`: sized for 2d acnhors\n (`zsizes`: additional z sizes for 3d)\n log_num_anchors: name of logger to use; if None, no logging\n will be performed\n **kwargs:\n \"\"\"\n logger.info(f\"Architecture overwrites: {model_cfg['plan_arch_overwrites']} \"\n f\"Anchor overwrites: {model_cfg['plan_anchors_overwrites']}\")\n logger.info(f\"Building architecture according to plan of {plan_arch.get('arch_name', 'not_found')}\")\n plan_arch.update(model_cfg[\"plan_arch_overwrites\"])\n plan_anchors.update(model_cfg[\"plan_anchors_overwrites\"])\n logger.info(f\"Start channels: {plan_arch['start_channels']}; \"\n f\"head channels: {plan_arch['head_channels']}; \"\n f\"fpn channels: {plan_arch['fpn_channels']}\")\n\n _plan_anchors = copy.deepcopy(plan_anchors)\n coder = BoxCoderND(weights=(1.,) * (plan_arch[\"dim\"] * 2))\n s_param = False if (\"aspect_ratios\" in _plan_anchors) and \\\n (_plan_anchors[\"aspect_ratios\"] is not None) else True\n anchor_generator = get_anchor_generator(\n plan_arch[\"dim\"], s_param=s_param)(**_plan_anchors)\n\n encoder = cls._build_encoder(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n )\n decoder = cls._build_decoder(\n encoder=encoder,\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n )\n matcher = cls.matcher_cls(\n similarity_fn=box_iou,\n **model_cfg[\"matcher_kwargs\"],\n )\n\n classifier = cls._build_head_classifier(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n anchor_generator=anchor_generator,\n )\n regressor = cls._build_head_regressor(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n anchor_generator=anchor_generator,\n )\n head = cls._build_head(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n classifier=classifier,\n regressor=regressor,\n coder=coder\n )\n segmenter = cls._build_segmenter(\n plan_arch=plan_arch,\n model_cfg=model_cfg,\n decoder=decoder,\n )\n\n detections_per_img = plan_arch.get(\"detections_per_img\", 100)\n score_thresh = plan_arch.get(\"score_thresh\", 0)\n topk_candidates = plan_arch.get(\"topk_candidates\", 10000)\n remove_small_boxes = plan_arch.get(\"remove_small_boxes\", 0.01)\n nms_thresh = plan_arch.get(\"nms_thresh\", 0.6)\n\n logger.info(f\"Model Inference Summary: \\n\"\n f\"detections_per_img: {detections_per_img} \\n\"\n f\"score_thresh: {score_thresh} \\n\"\n f\"topk_candidates: {topk_candidates} \\n\"\n f\"remove_small_boxes: {remove_small_boxes} \\n\"\n f\"nms_thresh: {nms_thresh}\",\n )\n\n return BaseRetinaNet(\n dim=plan_arch[\"dim\"],\n encoder=encoder,\n decoder=decoder,\n head=head,\n anchor_generator=anchor_generator,\n matcher=matcher,\n num_classes=plan_arch[\"classifier_classes\"],\n decoder_levels=plan_arch[\"decoder_levels\"],\n segmenter=segmenter,\n # model_max_instances_per_batch_element (in mdt per img, per class; here: per img)\n detections_per_img=detections_per_img,\n score_thresh=score_thresh,\n topk_candidates=topk_candidates,\n remove_small_boxes=remove_small_boxes,\n nms_thresh=nms_thresh,\n )\n\n @classmethod\n def _build_encoder(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n ) -> EncoderType:\n \"\"\"\n Build encoder network\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n\n Returns:\n EncoderType: encoder instance\n \"\"\"\n conv = Generator(cls.base_conv_cls, plan_arch[\"dim\"])\n logger.info(f\"Building:: encoder {cls.encoder_cls.__name__}: {model_cfg['encoder_kwargs']} \")\n encoder = cls.encoder_cls(\n conv=conv,\n conv_kernels=plan_arch[\"conv_kernels\"],\n strides=plan_arch[\"strides\"],\n block_cls=cls.block,\n in_channels=plan_arch[\"in_channels\"],\n start_channels=plan_arch[\"start_channels\"],\n stage_kwargs=None,\n max_channels=plan_arch.get(\"max_channels\", 320),\n **model_cfg['encoder_kwargs'],\n )\n return encoder\n\n @classmethod\n def _build_decoder(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n encoder: EncoderType,\n ) -> DecoderType:\n \"\"\"\n Build decoder network\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n\n Returns:\n DecoderType: decoder instance\n \"\"\"\n conv = Generator(cls.base_conv_cls, plan_arch[\"dim\"])\n logger.info(f\"Building:: decoder {cls.decoder_cls.__name__}: {model_cfg['decoder_kwargs']}\")\n decoder = cls.decoder_cls(\n conv=conv,\n conv_kernels=plan_arch[\"conv_kernels\"],\n strides=encoder.get_strides(),\n in_channels=encoder.get_channels(),\n decoder_levels=plan_arch[\"decoder_levels\"],\n fixed_out_channels=plan_arch[\"fpn_channels\"],\n **model_cfg['decoder_kwargs'],\n )\n return decoder\n\n @classmethod\n def _build_head_classifier(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n anchor_generator: AnchorGeneratorType,\n ) -> ClassifierType:\n \"\"\"\n Build classification subnetwork for detection head\n\n Args:\n anchor_generator: anchor generator instance\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n\n Returns:\n ClassifierType: classification instance\n \"\"\"\n conv = Generator(cls.head_conv_cls, plan_arch[\"dim\"])\n name = cls.head_classifier_cls.__name__\n kwargs = model_cfg['head_classifier_kwargs']\n\n logger.info(f\"Building:: classifier {name}: {kwargs}\")\n classifier = cls.head_classifier_cls(\n conv=conv,\n in_channels=plan_arch[\"fpn_channels\"],\n internal_channels=plan_arch[\"head_channels\"],\n num_classes=plan_arch[\"classifier_classes\"],\n anchors_per_pos=anchor_generator.num_anchors_per_location()[0],\n num_levels=len(plan_arch[\"decoder_levels\"]),\n **kwargs,\n )\n return classifier\n\n @classmethod\n def _build_head_regressor(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n anchor_generator: AnchorGeneratorType,\n ) -> RegressorType:\n \"\"\"\n Build regression subnetwork for detection head\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n anchor_generator: anchor generator instance\n\n Returns:\n RegressorType: classification instance\n \"\"\"\n conv = Generator(cls.head_conv_cls, plan_arch[\"dim\"])\n name = cls.head_regressor_cls.__name__\n kwargs = model_cfg['head_regressor_kwargs']\n\n logger.info(f\"Building:: regressor {name}: {kwargs}\")\n regressor = cls.head_regressor_cls(\n conv=conv,\n in_channels=plan_arch[\"fpn_channels\"],\n internal_channels=plan_arch[\"head_channels\"],\n anchors_per_pos=anchor_generator.num_anchors_per_location()[0],\n num_levels=len(plan_arch[\"decoder_levels\"]),\n **kwargs,\n )\n return regressor\n\n @classmethod\n def _build_head(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n classifier: ClassifierType,\n regressor: RegressorType,\n coder: CoderType,\n ) -> HeadType:\n \"\"\"\n Build detection head\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n classifier: classifier instance\n regressor: regressor instance\n coder: coder instance to encode boxes\n\n Returns:\n HeadType: instantiated head\n \"\"\"\n head_name = cls.head_cls.__name__\n head_kwargs = model_cfg['head_kwargs']\n sampler_name = cls.head_sampler_cls.__name__\n sampler_kwargs = model_cfg['head_sampler_kwargs']\n\n logger.info(f\"Building:: head {head_name}: {head_kwargs} \"\n f\"sampler {sampler_name}: {sampler_kwargs}\")\n sampler = cls.head_sampler_cls(**sampler_kwargs)\n head = cls.head_cls(\n classifier=classifier,\n regressor=regressor,\n coder=coder,\n sampler=sampler,\n log_num_anchors=None,\n **head_kwargs,\n )\n return head\n\n @classmethod\n def _build_segmenter(\n cls,\n plan_arch: dict,\n model_cfg: dict,\n decoder: DecoderType,\n ) -> SegmenterType:\n \"\"\"\n Build segmenter head\n\n Args:\n plan_arch: architecture settings\n model_cfg: additional architecture settings\n decoder: decoder instance\n\n Returns:\n SegmenterType: segmenter head\n \"\"\"\n if cls.segmenter_cls is not None:\n name = cls.segmenter_cls.__name__\n kwargs = model_cfg['segmenter_kwargs']\n conv = Generator(cls.base_conv_cls, plan_arch[\"dim\"])\n\n logger.info(f\"Building:: segmenter {name} {kwargs}\")\n segmenter = cls.segmenter_cls(\n conv,\n seg_classes=plan_arch[\"seg_classes\"],\n in_channels=decoder.get_channels(),\n decoder_levels=plan_arch[\"decoder_levels\"],\n **kwargs,\n )\n else:\n segmenter = None\n return segmenter\n\n @staticmethod\n def get_ensembler_cls(key: Hashable, dim: int) -> Callable:\n \"\"\"\n Get ensembler classes to combine multiple predictions\n Needs to be overwritten in subclasses!\n \"\"\"\n _lookup = {\n 2: {\n \"boxes\": None,\n \"seg\": None,\n },\n 3: {\n \"boxes\": BoxEnsemblerSelective,\n \"seg\": SegmentationEnsembler,\n }\n }\n if dim == 2:\n raise NotImplementedError\n return _lookup[dim][key]\n\n @classmethod\n def get_predictor(cls,\n plan: Dict,\n models: Sequence[RetinaUNetModule],\n num_tta_transforms: int = None,\n do_seg: bool = False,\n **kwargs,\n ) -> Predictor:\n # process plan\n crop_size = plan[\"patch_size\"]\n batch_size = plan[\"batch_size\"]\n inferene_plan = plan.get(\"inference_plan\", {})\n logger.info(f\"Found inference plan: {inferene_plan} for prediction\")\n if num_tta_transforms is None:\n num_tta_transforms = 8 if plan[\"network_dim\"] == 3 else 4\n\n # setup\n tta_transforms, tta_inverse_transforms = \\\n get_tta_transforms(num_tta_transforms, True)\n logger.info(f\"Using {len(tta_transforms)} tta transformations for prediction (one dummy trafo).\")\n\n ensembler = {\"boxes\": partial(\n cls.get_ensembler_cls(key=\"boxes\", dim=plan[\"network_dim\"]).from_case,\n parameters=inferene_plan,\n )}\n if do_seg:\n ensembler[\"seg\"] = partial(\n cls.get_ensembler_cls(key=\"seg\", dim=plan[\"network_dim\"]).from_case,\n )\n\n predictor = Predictor(\n ensembler=ensembler,\n models=models,\n crop_size=crop_size,\n tta_transforms=tta_transforms,\n tta_inverse_transforms=tta_inverse_transforms,\n batch_size=batch_size,\n **kwargs,\n )\n if plan[\"network_dim\"] == 2:\n raise NotImplementedError\n predictor.pre_transform = Inference2D([\"data\"])\n return predictor\n\n def sweep(self,\n cfg: dict,\n save_dir: os.PathLike,\n train_data_dir: os.PathLike,\n case_ids: Sequence[str],\n run_prediction: bool = True,\n **kwargs,\n ) -> Dict[str, Any]:\n \"\"\"\n Sweep detection parameters to find the best predictions\n\n Args:\n cfg: config used for training\n save_dir: save dir used for training\n train_data_dir: directory where preprocessed training/validation\n data is located\n case_ids: case identifies to prepare and predict\n run_prediction: predict cases\n **kwargs: keyword arguments passed to predict function\n\n Returns:\n Dict: inference plan\n e.g. (exact params depend on ensembler class usef for prediction)\n `iou_thresh` (float): best IoU threshold\n `score_thresh (float)`: best score threshold\n `no_overlap` (bool): enable/disable class independent NMS (ciNMS)\n \"\"\"\n logger.info(f\"Running parameter sweep on {case_ids}\")\n\n train_data_dir = Path(train_data_dir)\n preprocessed_dir = train_data_dir.parent\n processed_eval_labels = preprocessed_dir / \"labelsTr\"\n\n _save_dir = save_dir / \"sweep\"\n _save_dir.mkdir(parents=True, exist_ok=True)\n\n prediction_dir = save_dir / \"sweep_predictions\"\n prediction_dir.mkdir(parents=True, exist_ok=True)\n\n if run_prediction:\n logger.info(\"Predict cases with default settings...\")\n predictor = predict_dir(\n source_dir=train_data_dir,\n target_dir=prediction_dir,\n cfg=cfg,\n plan=self.plan,\n source_models=save_dir,\n num_models=1,\n num_tta_transforms=None,\n case_ids=case_ids,\n save_state=True,\n model_fn=get_loader_fn(mode=self.trainer_cfg.get(\"sweep_ckpt\", \"last\")),\n **kwargs,\n )\n\n logger.info(\"Start parameter sweep...\")\n ensembler_cls = self.get_ensembler_cls(key=\"boxes\", dim=self.plan[\"network_dim\"])\n sweeper = BoxSweeper(\n classes=[item for _, item in cfg[\"data\"][\"labels\"].items()],\n pred_dir=prediction_dir,\n gt_dir=processed_eval_labels,\n target_metric=self.eval_score_key,\n ensembler_cls=ensembler_cls,\n save_dir=_save_dir,\n )\n inference_plan = sweeper.run_postprocessing_sweep()\n return inference_plan\n",
"\"\"\"\nCopyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom pathlib import Path\n\nimport numpy as np\nimport SimpleITK as sitk\nfrom itertools import product\n\n\nfrom typing import Sequence, Union, Tuple\n\n\ndef create_circle_mask_itk(image_itk: sitk.Image,\n world_centers: Sequence[Sequence[float]],\n world_rads: Sequence[float],\n ndim: int = 3,\n ) -> sitk.Image:\n \"\"\"\n Creates an itk image with circles defined by center points and radii\n\n Args:\n image_itk: original image (used for the coordinate frame)\n world_centers: Sequence of center points in world coordiantes (x, y, z)\n world_rads: Sequence of radii to use\n ndim: number of spatial dimensions\n\n Returns:\n sitk.Image: mask with circles\n \"\"\"\n image_np = sitk.GetArrayFromImage(image_itk)\n min_spacing = min(image_itk.GetSpacing())\n\n if image_np.ndim > ndim:\n image_np = image_np[0]\n mask_np = np.zeros_like(image_np).astype(np.uint8)\n\n for _id, (world_center, world_rad) in enumerate(zip(world_centers, world_rads), start=1):\n check_rad = (world_rad / min_spacing) * 1.5 # add some buffer to it\n bounds = []\n center = image_itk.TransformPhysicalPointToContinuousIndex(world_center)[::-1]\n for ax, c in enumerate(center):\n bounds.append((\n max(0, int(c - check_rad)),\n min(mask_np.shape[ax], int(c + check_rad)),\n ))\n coord_box = product(*[list(range(b[0], b[1])) for b in bounds])\n\n # loop over every pixel position\n for coord in coord_box:\n world_coord = image_itk.TransformIndexToPhysicalPoint(tuple(reversed(coord))) # reverse order to x, y, z for sitk\n dist = np.linalg.norm(np.array(world_coord) - np.array(world_center))\n if dist <= world_rad:\n mask_np[tuple(coord)] = _id\n assert mask_np.max() == _id\n\n mask_itk = sitk.GetImageFromArray(mask_np)\n mask_itk.SetOrigin(image_itk.GetOrigin())\n mask_itk.SetDirection(image_itk.GetDirection())\n mask_itk.SetSpacing(image_itk.GetSpacing())\n return mask_itk\n\n\ndef load_sitk(path: Union[Path, str], **kwargs) -> sitk.Image:\n \"\"\"\n Functional interface to load image with sitk\n\n Args:\n path: path to file to load\n\n Returns:\n sitk.Image: loaded sitk image\n \"\"\"\n return sitk.ReadImage(str(path), **kwargs)\n\n\ndef load_sitk_as_array(path: Union[Path, str], **kwargs) -> Tuple[np.ndarray, dict]:\n \"\"\"\n Functional interface to load sitk image and convert it to an array\n\n Args:\n path: path to file to load\n\n Returns:\n np.ndarray: loaded image data\n dict: loaded meta data\n \"\"\"\n img_itk = load_sitk(path, **kwargs)\n meta = {key: img_itk.GetMetaData(key) for key in img_itk.GetMetaDataKeys()}\n return sitk.GetArrayFromImage(img_itk), meta\n",
"\"\"\"\nCopyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nfrom torch import Tensor\nfrom loguru import logger\n\nfrom nndet.losses.base import reduction_helper\nfrom nndet.utils import make_onehot_batch\n\n\ndef one_hot_smooth(data,\n num_classes: int,\n smoothing: float = 0.0,\n ):\n targets = torch.empty(size=(*data.shape, num_classes), device=data.device)\\\n .fill_(smoothing / num_classes)\\\n .scatter_(-1, data.long().unsqueeze(-1), 1. - smoothing)\n return targets\n\n\[email protected]\ndef focal_loss_with_logits(\n logits: torch.Tensor,\n target: torch.Tensor, gamma: float,\n alpha: float = -1,\n reduction: str = \"mean\",\n ) -> torch.Tensor:\n \"\"\"\n Focal loss\n https://arxiv.org/abs/1708.02002\n\n Args:\n logits: predicted logits [N, dims]\n target: (float) binary targets [N, dims]\n gamma: balance easy and hard examples in focal loss\n alpha: balance positive and negative samples [0, 1] (increasing\n alpha increase weight of foreground classes (better recall))\n reduction: 'mean'|'sum'|'none'\n mean: mean of loss over entire batch\n sum: sum of loss over entire batch\n none: no reduction\n\n Returns:\n torch.Tensor: loss\n\n See Also\n :class:`BFocalLossWithLogits`, :class:`FocalLossWithLogits`\n \"\"\"\n bce_loss = F.binary_cross_entropy_with_logits(logits, target, reduction='none')\n\n p = torch.sigmoid(logits)\n pt = (p * target + (1 - p) * (1 - target))\n\n focal_term = (1. - pt).pow(gamma)\n loss = focal_term * bce_loss\n\n if alpha >= 0:\n alpha_t = (alpha * target + (1 - alpha) * (1 - target))\n loss = alpha_t * loss\n\n return reduction_helper(loss, reduction=reduction)\n\n\nclass FocalLossWithLogits(nn.Module):\n def __init__(self,\n gamma: float = 2,\n alpha: float = -1,\n reduction: str = \"sum\",\n loss_weight: float = 1.,\n ):\n \"\"\"\n Focal loss with multiple classes (uses one hot encoding and sigmoid)\n\n Args:\n gamma: balance easy and hard examples in focal loss\n alpha: balance positive and negative samples [0, 1] (increasing\n alpha increase weight of foreground classes (better recall))\n reduction: 'mean'|'sum'|'none'\n mean: mean of loss over entire batch\n sum: sum of loss over entire batch\n none: no reduction\n loss_weight: scalar to balance multiple losses\n \"\"\"\n super().__init__()\n self.gamma = gamma\n self.alpha = alpha\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n def forward(self,\n logits: torch.Tensor,\n targets: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Compute loss\n\n Args:\n logits: predicted logits [N, C, dims], where N is the batch size,\n C number of classes, dims are arbitrary spatial dimensions\n (background classes should be located at channel 0 if\n ignore background is enabled)\n targets: targets encoded as numbers [N, dims], where N is the\n batch size, dims are arbitrary spatial dimensions\n\n Returns:\n torch.Tensor: loss\n \"\"\"\n n_classes = logits.shape[1] + 1\n target_onehot = make_onehot_batch(targets, n_classes=n_classes).float()\n target_onehot = target_onehot[:, 1:]\n\n return self.loss_weight * focal_loss_with_logits(\n logits, target_onehot,\n gamma=self.gamma,\n alpha=self.alpha,\n reduction=self.reduction,\n )\n\n\nclass BCEWithLogitsLossOneHot(torch.nn.BCEWithLogitsLoss):\n def __init__(self,\n *args,\n num_classes: int,\n smoothing: float = 0.0,\n loss_weight: float = 1.,\n **kwargs,\n ):\n \"\"\"\n BCE loss with one hot encoding of targets\n\n Args:\n num_classes: number of classes\n smoothing: label smoothing\n loss_weight: scalar to balance multiple losses\n \"\"\"\n super().__init__(*args, **kwargs)\n self.smoothing = smoothing\n if smoothing > 0:\n logger.info(f\"Running label smoothing with smoothing: {smoothing}\")\n self.num_classes = num_classes\n self.loss_weight = loss_weight\n\n def forward(self,\n input: Tensor,\n target: Tensor,\n ) -> Tensor:\n \"\"\"\n Compute bce loss based on one hot encoding\n\n Args:\n input: logits for all foreground classes [N, C]\n N is the number of anchors, and C is the number of foreground\n classes\n target: target classes. 0 is treated as background, >0 are\n treated as foreground classes. [N] is the number of anchors\n\n Returns:\n Tensor: final loss\n \"\"\"\n target_one_hot = one_hot_smooth(\n target, num_classes=self.num_classes + 1, smoothing=self.smoothing) # [N, C + 1]\n target_one_hot = target_one_hot[:, 1:] # background is implicitly encoded\n\n return self.loss_weight * super().forward(input, target_one_hot.float())\n\n\nclass CrossEntropyLoss(torch.nn.CrossEntropyLoss):\n def __init__(self,\n *args,\n loss_weight: float = 1.,\n **kwargs,\n ) -> None:\n \"\"\"\n Same as CE from pytorch with additional loss weight for uniform API\n \"\"\"\n super().__init__(*args, **kwargs)\n self.loss_weight = loss_weight\n\n def forward(self,\n input: Tensor,\n target: Tensor,\n ) -> Tensor:\n \"\"\"\n Same as CE from pytorch\n \"\"\"\n return self.loss_weight * super().forward(input, target)\n",
"\"\"\"\nCopyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport time\nimport numpy as np\n\nfrom pathlib import Path\nfrom loguru import logger\nfrom typing import Sequence, List, Dict, Any, Tuple\n\n\nimport matplotlib.pyplot as plt\n\nfrom nndet.evaluator import DetectionMetric\n\n\nclass PredictionHistogram(DetectionMetric):\n def __init__(self,\n classes: Sequence[str], save_dir: Path,\n iou_thresholds: Sequence[float] = (0.1, 0.5),\n bins: int = 50):\n \"\"\"\n Class to compute prediction histograms. (Note: this class does not\n provide any scalar metrics)\n\n Args:\n classes: name of each class (index needs to correspond to predicted class indices!)\n save_dir: directory where histograms are saved to\n iou_thresholds: IoU thresholds for which FROC is evaluated\n bins: number of bins of histogram\n \"\"\"\n self.classes = classes\n self.save_dir = save_dir\n\n self.iou_thresholds = iou_thresholds\n self.bins = bins\n\n def get_iou_thresholds(self) -> Sequence[float]:\n \"\"\"\n Return IoU thresholds needed for this metric in an numpy array\n\n Returns:\n Sequence[float]: IoU thresholds [M], M is the number of thresholds\n \"\"\"\n return self.iou_thresholds\n\n def compute(self, results_list: List[Dict[int, Dict[str, np.ndarray]]]) -> Tuple[\n Dict[str, float], Dict[str, Dict[str, Any]]]:\n \"\"\"\n Plot class independent and per class histograms. For more info see\n `method``plot_hist`\n\n Args:\n Dict: results over dataset\n \"\"\"\n self.plot_hist(results_list=results_list)\n for cls_idx, cls_str in enumerate(self.classes):\n # filter current class from list of results and put them into a dict with a single entry\n results_by_cls = [{0: r[cls_idx]} for r in results_list if cls_idx in r if cls_idx in r]\n self.plot_hist(results_by_cls, title_prefix=f\"cl_{cls_str}_\")\n return {}, {}\n\n def plot_hist(self, results_list: List[Dict[int, Dict[str, np.ndarray]]],\n title_prefix: str = \"\") -> Tuple[\n Dict[str, float], Dict[str, Dict[str, Any]]]:\n \"\"\"\n Compute prediction histograms for multiple IoU values\n\n Args:\n results_list (List[Dict[int, Dict[str, np.ndarray]]]): list with result s per image (in list)\n per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.\n `dtMatches`: matched detections [T, G], where T = number of thresholds, G = number of ground truth\n `gtMatches`: matched ground truth boxes [T, D], where T = number of thresholds,\n D = number of detections\n `dtScores`: prediction scores [D] detection scores\n `gtIgnore`: ground truth boxes which should be ignored [G] indicate whether ground truth\n should be ignored\n `dtIgnore`: detections which should be ignored [T, D], indicate which detections should be ignored\n title_prefix: prefix for title of histogram plot\n\n Returns:\n Dict: empty\n Dict[Dict[str, Any]]: histogram informations\n `{IoU Value}`:\n `tp_hist` (np.ndarray): histogram if true positives; false negatives @ score=0 [:attr:`self.bins`]\n `fp_hist` (np.ndarray): false positive histogram [:attr:`self.bins`]\n `true_positives` (int): number of true positives according to matching\n `false_positives` (int): number of false_positives according to matching\n `false_negatives` (int): number of false_negatives according to matching\n \"\"\"\n num_images = len(results_list)\n results = [_r for r in results_list for _r in r.values()]\n\n if len(results) == 0:\n logger.warning(f\"WARNING, no results found for froc computation\")\n return {}, {}\n\n # r['dtMatches'] [T, R], where R = sum(all detections)\n dt_matches = np.concatenate([r['dtMatches'] for r in results], axis=1)\n dt_ignores = np.concatenate([r['dtIgnore'] for r in results], axis=1)\n dt_scores = np.concatenate([r['dtScores'] for r in results])\n gt_ignore = np.concatenate([r['gtIgnore'] for r in results])\n self.check_number_of_iou(dt_matches, dt_ignores)\n \n num_gt = np.count_nonzero(gt_ignore == 0) # number of ground truth boxes (non ignored)\n if num_gt == 0:\n logger.error(\"No ground truth found! Returning nothing.\")\n return {}, {}\n\n for iou_idx, iou_val in enumerate(self.iou_thresholds):\n # filter scores with ignores detections\n _scores = dt_scores[np.logical_not(dt_ignores[iou_idx])]\n assert len(_scores) == len(dt_matches[iou_idx])\n _ = self.compute_histogram_one_iou(\\\n dt_matches[iou_idx], _scores, num_images, num_gt, iou_val, title_prefix)\n return {}, {}\n\n def compute_histogram_one_iou(self, dt_matches: np.ndarray, dt_scores: np.ndarray,\n num_images: int, num_gt: int, iou: float,\n title_prefix: str):\n \"\"\"\n Plot prediction histogram\n \n Args:\n dt_matches (np.ndarray): binary array indicating which bounding\n boxes have a large enough overlap with gt;\n [R] where R is the number of predictions\n dt_scores (np.ndarray): prediction score for each bounding box;\n [R] where R is the number of predictions\n num_images (int): number of images\n num_gt (int): number of ground truth bounding boxes\n iou: IoU values which is currently evaluated\n title_prefix: prefix for title of histogram plot\n \"\"\"\n num_matched = np.sum(dt_matches)\n false_negatives = num_gt - num_matched # false negatives\n true_positives = np.sum(dt_matches)\n false_positives = np.sum(dt_matches == 0)\n\n _dt_matches = np.concatenate([dt_matches, [1] * int(false_negatives)])\n _dt_scores = np.concatenate([dt_scores, [0] * int(false_negatives)])\n\n plt.figure()\n plt.yscale('log')\n if 0 in dt_matches:\n plt.hist(_dt_scores[_dt_matches == 0], bins=self.bins, range=(0., 1.), \n alpha=0.3, color='g', label='false pos.')\n if 1 in dt_matches:\n plt.hist(_dt_scores[_dt_matches == 1], bins=self.bins, range=(0., 1.),\n alpha=0.3, color='b', label='true pos. (false neg. @ score=0)')\n plt.legend()\n title = title_prefix + (f\"tp:{true_positives} fp:{false_positives} \"\n f\"fn:{false_negatives} pos:{true_positives+false_negatives}\")\n plt.title(title)\n plt.xlabel('confidence score')\n plt.ylabel('log n')\n\n if self.save_dir is not None:\n save_path = self.save_dir / (f\"{title_prefix}pred_hist_IoU@{iou}\".replace(\".\", \"_\") + \".png\")\n logger.info(f\"Saving {save_path}\")\n plt.savefig(save_path)\n plt.close()\n return None\n"
] | [
[
"torch.no_grad",
"numpy.mean",
"torch.optim.SGD"
],
[
"numpy.array",
"numpy.zeros_like"
],
[
"torch.sigmoid",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.empty"
],
[
"matplotlib.pyplot.legend",
"numpy.logical_not",
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"matplotlib.pyplot.ylabel",
"numpy.count_nonzero",
"matplotlib.pyplot.close",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
isabella232/nnabla | [
"82a3c6fed382f889d1a4a429c696bb8cedf6ce79",
"82a3c6fed382f889d1a4a429c696bb8cedf6ce79",
"62a21db4afc15c52ce43f3f5b87e5fa4181b2deb",
"82a3c6fed382f889d1a4a429c696bb8cedf6ce79",
"82a3c6fed382f889d1a4a429c696bb8cedf6ce79",
"82a3c6fed382f889d1a4a429c696bb8cedf6ce79"
] | [
"python/test/function/test_affine.py",
"python/test/solver/test_adam.py",
"python/benchmark/function/test_cumprod.py",
"python/src/nnabla/experimental/graph_converters/channel_last.py",
"python/test/function/test_mul_n.py",
"python/test/function/test_categorical_cross_entropy.py"
] | [
"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\n\nctxs = list_context('Affine')\n\n\ndef ref_affine(x, w, b, base_axis):\n shape = list(x.shape[:base_axis])\n shape += [-1]\n out_shape = w.shape[1:]\n y = np.dot(x.reshape(*shape), w.reshape(w.shape[0], -1))\n if b is not None:\n y += b.reshape((1,) * (len(shape) - 1) + (-1,))\n return y.reshape(tuple(shape[:-1]) + tuple(out_shape))\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\[email protected](\"base_axis, weight_shape\",\n [(1, (12, 2, 3)), (2, (4, 4)), (-1, (4, 4)), (-2, (12, 3, 4))])\[email protected](\"bias\", [True, False])\ndef test_affine_forward_backward(seed, base_axis, weight_shape, bias,\n ctx, func_name):\n\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n # Input\n inputs = [rng.randn(2, 3, 4).astype(np.float32)]\n # Weight\n inputs += [rng.randn(*weight_shape).astype(np.float32)]\n # Bias\n if bias:\n inputs += [rng.randn(*weight_shape[1:]).astype(np.float32)]\n else:\n inputs += [None]\n function_tester(rng, F.affine, ref_affine, inputs, func_args=[base_axis],\n atol_b=1e-2, dstep=1e-3, ctx=ctx, func_name=func_name)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\[email protected](\"base_axis, weight_shape\",\n [(1, (12, 3, 4)), (2, (4, 4)), (-1, (4, 4)), (-2, (12, 3, 4))])\[email protected](\"bias\", [True, False])\ndef test_affine_double_backward(seed, base_axis, weight_shape, bias,\n ctx, func_name):\n\n from nbla_test_utils import backward_function_tester, grad_function_forward_function_output\n from nnabla.backward_function.affine import AffineDataGrad, AffineFilterGrad\n rng = np.random.RandomState(seed)\n # Input\n inputs = [rng.randn(2, 3, 4).astype(np.float32)]\n # Weight\n inputs += [rng.randn(*weight_shape).astype(np.float32)]\n # Bias\n if bias:\n inputs += [rng.randn(*weight_shape[1:]).astype(np.float32) * 1e2]\n else:\n inputs += [None]\n func_args = [base_axis]\n # Affine\n backward_function_tester(rng, F.affine, inputs, func_args=func_args,\n dstep=1e-3, ctx=ctx)\n # DataGrad\n df, y = grad_function_forward_function_output(AffineDataGrad,\n F.affine, ctx, inputs, *func_args)\n df.xshape = inputs[0].shape\n ginputs = [rng.randn(*y.shape), inputs[1]]\n backward_function_tester(rng, df, ginputs, func_args=[],\n atol_accum=2e-2, dstep=1e-3, ctx=ctx, non_accum_check=True)\n\n # FilterGrad\n df, y = grad_function_forward_function_output(AffineFilterGrad,\n F.affine, ctx, inputs, *func_args)\n df.wshape = inputs[1].shape\n ginputs = [rng.randn(*y.shape), inputs[0]]\n backward_function_tester(rng, df, ginputs, func_args=[],\n dstep=1e-3, ctx=ctx, non_accum_check=True)\n",
"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport nnabla.solvers as S\nimport numpy as np\nfrom solver_test_utils import solver_tester, RefSolver\nfrom nbla_test_utils import list_context\n\nctxs = list_context('Adam')\n\n\nclass RefAdam(RefSolver):\n\n def __init__(self, alpha, beta1, beta2, eps):\n super().__init__()\n self.alpha = alpha\n self.beta1 = beta1\n self.beta2 = beta2\n self.eps = eps\n self.m = {}\n self.v = {}\n self.t = {}\n\n def _set_state_impl(self, key, param):\n self.m[key] = np.zeros_like(param)\n self.v[key] = np.zeros_like(param)\n self.t[key] = 0\n\n def _update_impl(self, key, p, g):\n self.t[key] = min(self.t[key] + 1, np.iinfo(np.int32).max)\n _update_adam(p, g, self.m[key], self.v[key], self.t[key],\n self.alpha, self.beta1, self.beta2, self.eps)\n\n\ndef _update_adam(p, g, m, v, t, alpha, beta1, beta2, eps):\n alpha_t = alpha * \\\n np.sqrt(1. - beta2 ** t) / (1. - beta1 ** t)\n m[...] = beta1 * m + (1 - beta1) * g\n v[...] = beta2 * v + (1 - beta2) * g * g\n p[...] = p - alpha_t * m / (np.sqrt(v) + eps)\n\n\[email protected](\"ctx, solver_name\", ctxs)\[email protected](\"decay\", [1e-4])\[email protected](\"alpha\", [1e-2, 1e-4])\[email protected](\"beta1, beta2\", [(0.9, 0.999), (0.999, 0.9)])\[email protected](\"eps\", [1e-8])\[email protected](\"seed\", [313])\ndef test_adam(seed, alpha, beta1, beta2, eps, decay, ctx, solver_name):\n rng = np.random.RandomState(seed)\n solver_tester(\n rng, S.Adam, RefAdam, [alpha, beta1, beta2, eps], atol=1e-6,\n ctx=ctx, solver_name=solver_name)\n",
"# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nimport numpy as np\nimport nnabla.functions as F\n\nfrom function_benchmark import FunctionBenchmark, Inspec\n\n\nclass Case:\n def __init__(self, shape, axis, rtol=1e-6):\n # rtol (relative tolerance) 1e-6 is default for assert_allclose\n self.shape = shape\n self.axis = axis\n self.rtol = rtol\n\n # Print this message by pytest when a test fails.\n def __repr__(self):\n return 'Case(shape=' + str(self.shape) + \\\n ' axes=' + str(self.axis) + \\\n ', rtol=' + str(self.rtol) + ')'\n\n\ntest_cases = [\n # --------------------------------\n # Common use case\n # --------------------------------\n # Axis 0\n Case((512, 512), 0),\n Case((512, 1024), 0),\n Case((512, 2048), 0),\n Case((1024, 512), 0),\n Case((1024, 1024), 0),\n Case((1024, 2048), 0),\n Case((2048, 512), 0),\n Case((2048, 1024), 0),\n Case((2048, 2048), 0),\n # Axis 1\n Case((512, 512), 1),\n Case((512, 1024), 1),\n Case((512, 2048), 1),\n Case((1024, 512), 1),\n Case((1024, 1024), 1),\n Case((1024, 2048), 1),\n Case((2048, 512), 1),\n Case((2048, 1024), 1),\n Case((2048, 2048), 1),\n\n # --------------------------------\n # Large cases\n # --------------------------------\n Case((1024*1024, 32), 1),\n Case((32, 1024*1024), 0),\n Case((2048, 2048), 1),\n Case((2048, 2048), 0),\n Case((2024*2024, 2), 0),\n Case((2, 2024*2024), 1),\n\n # Weak cases\n # PyTorch uses Cub library in these cases.\n Case((2024*2024, 1), 0),\n Case((1, 2024*2024), 1),\n]\n\n\ndef create_cumprod_input(rng, shape, axis, with_mask):\n x = (rng.randn(*shape)).astype(np.float32)\n if with_mask:\n # Make zero elements with the probability of `1 / x_shape[axis]`.\n # It is the probability of existence of one zero element in each scan axis.\n mask = rng.rand(*shape) > (1.0 / shape[axis])\n x = x * mask\n return x\n\n\[email protected](\"seed\", [123])\[email protected](\"test_case\", test_cases)\[email protected]('exclusive', [False, True])\[email protected]('reverse', [False, True])\[email protected](\"with_mask\", [True, False])\ndef test_cumprod(seed, test_case, exclusive, reverse, with_mask, nnabla_opts):\n x_shape = test_case.shape\n axis = test_case.axis\n\n def init(shape):\n rng = np.random.RandomState(seed)\n return create_cumprod_input(rng, shape, axis, with_mask)\n need_grad = True\n\n inputs = [Inspec(x_shape, init, need_grad)]\n\n func_kwargs = dict(\n axis=axis,\n exclusive=exclusive,\n reverse=reverse,\n )\n fb = FunctionBenchmark(\n F.cumprod, inputs, [], func_kwargs,\n nnabla_opts.ext, nnabla_opts.ext_kwargs)\n fb.benchmark()\n fb.write(writer=nnabla_opts.function_benchmark_writer)\n",
"# Copyright 2020,2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nnabla as nn\nimport nnabla.functions as F\nimport numpy as np\n\nfrom .graph_converter import FunctionModifier\n\n\nclass ChannelLastModifier(FunctionModifier):\n \"\"\"\n Convert graph shape from Channel first (NCHW) to Channel last (NHWC) format.\n\n Supported functions: `Convolution`, `Deconvolution`, `BatchNormalization`,\n `MaxPooling`, `AveragePooling`, `SumPooling`, `Unpooling`, `Concatenate`\n\n Args:\n inputs (list of nn.Variable): Original very begining inputs (NCHW) of a network.\n inputs_cl (list of nn.Variable): Channel last version of very begining inputs (NHWC) of a network.\n If this is not given, `inputs_cl` are generated internally and holded.\n\n Examples:\n\n .. code-block:: python\n\n pred = Model(...)\n\n import nnabla.experimental.graph_converters as GC\n\n modifiers = [GC.ChannelLastModifier(<inputs of pred>)]\n gc = GC.GraphConverter(modifiers)\n pred = gc.convert(pred)\n\n \"\"\"\n\n def __init__(self, inputs, inputs_cl=None):\n super(ChannelLastModifier, self).__init__()\n\n self._inputs = inputs\n self._inputs_cl = inputs_cl\n\n self._prepare_inputs(inputs, inputs_cl)\n\n def _prepare_inputs(self, inputs, inputs_cl=None):\n if inputs_cl is None:\n inputs_cl = []\n for inp in inputs:\n b, c, h, w = inp.shape\n x = nn.Variable([b, h, w, c])\n x.d = inp.d.copy().transpose([0, 2, 3, 1])\n inputs_cl.append(x)\n self.inputs_cl = inputs_cl\n\n # Replace the very begining of input\n for inp, inp_cl in zip(inputs, inputs_cl):\n f = inp.function_references[0]\n self.init_map_func_inputs(f, [inp_cl])\n\n def connect(self, fname, inputs, args):\n if fname in ['Convolution', 'Deconvolution']:\n # TODO: address leading batch dimension\n args['channel_last'] = True\n x = inputs[0]\n w = inputs[1]\n b = inputs[2] if len(inputs) == 3 else None\n scope = self.get_parameter_scope(w)\n with nn.parameter_scope(scope):\n wd = w.d.copy().transpose(0, 2, 3, 1)\n w = nn.parameter.get_parameter_or_create('W_cl', wd.shape, wd)\n o = F.convolution(x, w, b, **args)\n elif fname == 'BatchNormalization':\n # TODO: address leading batch dimension\n x = inputs[0]\n beta = inputs[1]\n gamma = inputs[2]\n mean = inputs[3]\n var = inputs[4]\n args['axes'] = [len(x.shape) - 1]\n if 'no_scale' in args:\n del args['no_scale']\n if 'no_bias' in args:\n del args['no_bias']\n scope = self.get_parameter_scope(beta)\n with nn.parameter_scope(scope):\n beta_d = beta.d.copy().transpose(0, 2, 3, 1)\n gamma_d = gamma.d.copy().transpose(0, 2, 3, 1)\n mean_d = mean.d.copy().transpose(0, 2, 3, 1)\n var_d = var.d.copy().transpose(0, 2, 3, 1)\n beta = nn.parameter.get_parameter_or_create(\n 'beta_cl', beta_d.shape, beta_d, beta.need_grad)\n gamma = nn.parameter.get_parameter_or_create(\n 'gamma_cl', gamma_d.shape, gamma_d, gamma.need_grad)\n mean = nn.parameter.get_parameter_or_create(\n 'mean_cl', mean_d.shape, mean_d, mean.need_grad)\n var = nn.parameter.get_parameter_or_create(\n 'var_cl', var_d.shape, var_d, var.need_grad)\n o = F.batch_normalization(x, beta, gamma, mean, var, **args)\n elif fname in ['MaxPooling', 'AveragePooling', 'SumPooling']:\n args['channel_last'] = True\n o = self._call_function(fname, inputs, args)\n elif fname in ['Concatenate']:\n args['axis'] = len(inputs[0].shape) - 1\n o = self._call_function(fname, inputs, args)\n elif fname == 'Affine':\n x = inputs[0]\n\n _, h_s, w_s, c_s = inputs[0].shape\n _, b_s = inputs[1].shape\n wd = inputs[1].d.copy()\n wd = np.reshape(wd, (c_s, h_s, w_s, b_s))\n wd = np.transpose(wd, (1, 2, 0, 3))\n wd = np.reshape(wd, (-1, b_s))\n w = nn.parameter.get_parameter_or_create(\n 'w_cl', wd.shape, wd, False)\n\n b = inputs[2] if len(inputs) == 3 else None\n o = F.affine(x, w, b, **args)\n else:\n o = self._call_function(fname, inputs, args)\n return o\n\n def modify(self, f, inputs):\n fname = f.info.type_name\n args = f.info.args\n if fname in ['Convolution', 'Deconvolution',\n 'BatchNormalization',\n 'MaxPooling', 'AveragePooling', 'SumPooling', 'Unpooling',\n 'Concatenate', 'Affine']:\n o = self.connect(fname, inputs, args)\n return o\n\n def __finish__(self):\n self._prepare_inputs(self._inputs, self._inputs_cl)\n",
"# Copyright 2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport nnabla as nn\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context, function_tester\n\nctxs = list_context('MulN')\n\n\ndef ref_function(*inputs, **params):\n y = 1\n for i in range(len(inputs)):\n y *= inputs[i]\n return y\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [314])\[email protected]('num_inputs', [2, 3, 5])\ndef test_mul_n_forward_backward(num_inputs, seed, ctx, func_name):\n rng = np.random.RandomState(seed)\n shape0 = [2, 3, 4]\n inputs = []\n for i in range(num_inputs):\n inputs.append(rng.randn(*shape0).astype(np.float32))\n function_tester(rng, F.mul_n, ref_function, inputs,\n ctx=ctx, func_name=func_name, atol_b=2e-3)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\[email protected]('num_inputs', [2, 3, 5])\ndef test_mul_n_double_backward(num_inputs, seed, ctx, func_name):\n from nbla_test_utils import backward_function_tester\n rng = np.random.RandomState(seed)\n shape0 = [2, 3, 4]\n inputs = []\n for i in range(num_inputs):\n inputs.append(rng.randn(*shape0).astype(np.float32))\n backward_function_tester(rng, F.mul_n,\n inputs=inputs,\n func_args=[], func_kwargs={},\n atol_accum=5e-2,\n dstep=1e-3,\n ctx=ctx)\n\n\[email protected](\"seed\", [313])\[email protected](\"input_shape\", [(2, 3, 4)])\[email protected](\"n_inputs, n_active\", [(3, 1), (5, 2), (10, 6)])\ndef test_mul(n_inputs, n_active, input_shape, seed):\n from nnabla.testing import assert_allclose\n rng = np.random.RandomState(seed)\n inputs = [rng.randn(*input_shape).astype('f4') for _ in range(n_inputs)]\n active = np.random.permutation(n_inputs) < n_active\n\n y = F.mul_n(*[nn.Variable.from_numpy_array(inp).apply(need_grad=True)\n for inp in inputs])\n y.parent.set_active_input_mask(active)\n y_ref = F.mul_n(*[nn.Variable.from_numpy_array(inp).apply(need_grad=True)\n for (act, inp) in zip(active, inputs) if act])\n\n y.forward()\n y_ref.forward()\n assert_allclose(y.d, y_ref.d)\n\n for inp in y.parent.inputs + y_ref.parent.inputs:\n inp.g = 0\n\n y.backward()\n y_ref.backward()\n active_inputs = [y.parent.inputs[i] for i, act in enumerate(active) if act]\n for inp, ref in zip(active_inputs, y_ref.parent.inputs):\n assert_allclose(inp.g, ref.g)\n",
"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport nnabla as nn\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\nfrom nnabla.testing import assert_allclose\n\nctxs = list_context('CategoricalCrossEntropy')\n\n\ndef ref_categorical_cross_entropy(x, l, axis):\n orig_x = x.copy()\n x = np.rollaxis(x, axis, x.ndim).reshape(-1, x.shape[axis])\n ll = np.rollaxis(l, axis, x.ndim).flatten()\n y = - \\\n np.log(\n np.maximum(x[np.arange(x.shape[0]), ll],\n np.finfo(np.float32).tiny))\n y[ll == -1] = 0\n return y.reshape(l.shape)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\[email protected](\"axis\", [0, 1, 2, -1, -2, -3])\ndef test_categorical_cross_entropy_forward_backward(seed, axis, ctx, func_name):\n from nbla_test_utils import function_tester\n ishape = [2, 3, 4]\n rng = np.random.RandomState(seed)\n\n l_shape = list(ishape)\n l_shape[axis] = 1\n n_class = ishape[axis]\n\n inputs = [\n rng.rand(2, 3, 4).astype(np.float32) * 0.9 + 0.05,\n rng.randint(-1, n_class, size=l_shape).astype(np.int)]\n\n function_tester(rng, F.categorical_cross_entropy,\n ref_categorical_cross_entropy, inputs,\n atol_b=5e-2, func_args=[axis], backward=[True, False], ctx=ctx, func_name=func_name)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\[email protected](\"axis\", [0, 1, 2, -1, -2, -3])\ndef test_categorical_cross_entropy_double_backward(seed, axis, ctx, func_name):\n from nbla_test_utils import backward_function_tester\n ishape = [2, 3, 4]\n rng = np.random.RandomState(seed)\n\n l_shape = list(ishape)\n l_shape[axis] = 1\n n_class = ishape[axis]\n\n inputs = [\n rng.rand(2, 3, 4).astype(np.float32) * 5,\n rng.randint(0, n_class, size=l_shape).astype(np.int)]\n\n backward_function_tester(rng, F.categorical_cross_entropy,\n inputs,\n atol_accum=1e-1, dstep=1e-3,\n func_args=[axis],\n backward=[True, False], ctx=ctx)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [314])\[email protected](\"axis\", [0, 1, 2, -1, -2, -3])\ndef test_categorical_cross_entropy_backward_with_negative_label(seed, axis, ctx, func_name):\n from nbla_test_utils import compute_analytical_and_numerical_grad_graph\n ishape = [2, 3, 4]\n rng = np.random.RandomState(seed)\n\n l_shape = list(ishape)\n l_shape[axis] = 1\n n_class = ishape[axis]\n\n inp0 = nn.Variable.from_numpy_array(rng.rand(2, 3, 4).astype(\n np.float32) * 0.9 + 0.05).apply(need_grad=True)\n inp1 = nn.Variable.from_numpy_array(\n rng.randint(-1, n_class, size=l_shape)).apply(need_grad=False)\n out = F.sum(F.categorical_cross_entropy(inp0, inp1, axis=axis))\n out.g.fill(1.0)\n inp0.g.fill(0)\n inp1.g.fill(0)\n analytical_grad, numerical_grad = compute_analytical_and_numerical_grad_graph(\n out, [inp0, inp1], recompute_graph=True)\n numerical_grad[inp0.size:] = 0\n assert_allclose(analytical_grad, numerical_grad, rtol=0.01, atol=0.01)\n"
] | [
[
"numpy.random.RandomState"
],
[
"numpy.random.RandomState",
"numpy.zeros_like",
"numpy.sqrt",
"numpy.iinfo"
],
[
"numpy.random.RandomState"
],
[
"numpy.reshape",
"numpy.transpose"
],
[
"numpy.random.permutation",
"numpy.random.RandomState"
],
[
"numpy.rollaxis",
"numpy.arange",
"numpy.random.RandomState",
"numpy.finfo"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cTatu/fracdiff | [
"0ee3967b98ab2e5d67dc72cc21a2543f4dc5b113"
] | [
"tests/test_fracdiffstat.py"
] | [
"import numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nfrom fracdiff import Fracdiff\nfrom fracdiff import FracdiffStat\nfrom fracdiff.stat import StatTester\n\n\nclass TestFracdiffStat:\n \"\"\"\n Test `FracdiffStat`.\n \"\"\"\n\n @staticmethod\n def _is_stat(x):\n return StatTester().is_stat(x)\n\n @pytest.mark.parametrize(\"window\", [10])\n @pytest.mark.parametrize(\"mode\", [\"full\", \"valid\"])\n @pytest.mark.parametrize(\"precision\", [0.01])\n @pytest.mark.parametrize(\"n_jobs\", [None, -1])\n def test_order(self, window, mode, precision, n_jobs):\n np.random.seed(42)\n X = np.random.randn(1000, 10).cumsum(0)\n\n fs = FracdiffStat(mode=mode, window=window, precision=precision, n_jobs=n_jobs)\n fs.fit(X)\n\n X_st = fs.transform(X)\n X_ns = np.empty_like(X_st[:, :0])\n\n for i in range(X.shape[1]):\n f = Fracdiff(fs.d_[i] - precision, mode=mode, window=window)\n X_ns = np.concatenate((X_ns, f.fit_transform(X[:, [i]])), 1)\n\n for i in range(X.shape[1]):\n assert self._is_stat(X_st[:, i])\n assert not self._is_stat(X_ns[:, i])\n\n @pytest.mark.parametrize(\"window\", [10])\n def test_lower_is_stat(self, window):\n \"\"\"\n Test if `StationarityFracdiff.fit` returns `lower`\n if `lower`th differenciation is already stationary.\n \"\"\"\n np.random.seed(42)\n X = np.random.randn(100, 1)\n\n f = FracdiffStat(window=window, lower=0.0).fit(X)\n\n assert f.d_[0] == 0.0\n\n @pytest.mark.parametrize(\"window\", [10])\n def test_upper_is_not_stat(self, window):\n \"\"\"\n Test if `StationarityFracdiff.fit` returns `np.nan`\n if `upper`th differenciation is still non-stationary.\n \"\"\"\n np.random.seed(42)\n X = np.random.randn(100, 1).cumsum(0)\n\n f = FracdiffStat(window=window, upper=0.0, lower=-1.0).fit(X)\n\n assert np.isnan(f.d_[0])\n\n @pytest.mark.parametrize(\"window\", [10])\n @pytest.mark.parametrize(\"mode\", [\"full\", \"valid\"])\n @pytest.mark.parametrize(\"precision\", [0.01])\n @pytest.mark.parametrize(\"n_jobs\", [None, -1])\n def test_transform(self, window, mode, precision, n_jobs):\n \"\"\"\n Test if `FracdiffStat.transform` works\n for array with n_features > 1.\n \"\"\"\n np.random.seed(42)\n X = np.random.randn(100, 10).cumsum(0)\n\n fs = FracdiffStat(\n window=window, mode=mode, precision=precision, n_jobs=n_jobs\n ).fit(X)\n out = fs.transform(X)\n\n exp = np.empty_like(out[:, :0])\n for i in range(X.shape[1]):\n f = Fracdiff(fs.d_[i], mode=mode, window=window)\n exp = np.concatenate((exp, f.fit_transform(X[:, [i]])), 1)\n\n assert_allclose(out, exp)\n"
] | [
[
"numpy.random.seed",
"numpy.isnan",
"numpy.empty_like",
"numpy.random.randn",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
function2-llx/MONAI | [
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"2fef7ff5c064a9ff6b6d6b4f2323180afed99934",
"e0db5a564225a7cb62e7a23df97267019006302f",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"e0db5a564225a7cb62e7a23df97267019006302f",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"e0db5a564225a7cb62e7a23df97267019006302f",
"e0db5a564225a7cb62e7a23df97267019006302f",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"2fef7ff5c064a9ff6b6d6b4f2323180afed99934",
"2fef7ff5c064a9ff6b6d6b4f2323180afed99934",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4",
"4cddaa830b61b88ec78e089bb5f21e05bb1a78f4"
] | [
"tests/test_hilbert_transform.py",
"tests/test_fill_holesd.py",
"tests/test_dataloader.py",
"tests/test_image_rw.py",
"tests/test_vote_ensemble.py",
"monai/engines/evaluator.py",
"monai/networks/nets/swin_unetr.py",
"tests/test_prepare_batch_extra_input.py",
"tests/test_handler_garbage_collector.py",
"monai/apps/detection/utils/box_coder.py",
"tests/test_nrrd_reader.py",
"tests/test_dynunet.py",
"tests/test_deepgrow_dataset.py",
"tests/test_rand_axis_flipd.py",
"tests/test_grid_splitd.py",
"tests/test_rand_spatial_crop_samplesd.py",
"tests/test_dints_network.py"
] | [
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.networks.layers import HilbertTransform\nfrom monai.utils import OptionalImportError\nfrom tests.utils import SkipIfModule, SkipIfNoModule, skip_if_no_cuda\n\n\ndef create_expected_numpy_output(input_datum, **kwargs):\n\n x = np.fft.fft(input_datum.cpu().numpy() if input_datum.device.type == \"cuda\" else input_datum.numpy(), **kwargs)\n f = np.fft.fftfreq(x.shape[kwargs[\"axis\"]])\n u = np.heaviside(f, 0.5)\n new_dims_before = kwargs[\"axis\"]\n new_dims_after = len(x.shape) - kwargs[\"axis\"] - 1\n for _ in range(new_dims_before):\n u = np.expand_dims(u, 0)\n for _ in range(new_dims_after):\n u = np.expand_dims(u, -1)\n ht = np.fft.ifft(x * 2 * u, axis=kwargs[\"axis\"])\n\n return ht\n\n\ncpu = torch.device(\"cpu\")\nn_samples = 500\nhann_windowed_sine = np.sin(2 * np.pi * 10 * np.linspace(0, 1, n_samples)) * np.hanning(n_samples)\n\n# CPU TEST DATA\n\ncpu_input_data = {}\ncpu_input_data[\"1D\"] = torch.as_tensor(hann_windowed_sine, device=cpu).unsqueeze(0).unsqueeze(0)\ncpu_input_data[\"2D\"] = (\n torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=cpu).unsqueeze(0).unsqueeze(0)\n)\ncpu_input_data[\"3D\"] = (\n torch.as_tensor(np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=cpu)\n .unsqueeze(0)\n .unsqueeze(0)\n)\ncpu_input_data[\"1D 2CH\"] = torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=cpu).unsqueeze(0)\ncpu_input_data[\"2D 2CH\"] = torch.as_tensor(\n np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=cpu\n).unsqueeze(0)\n\n# SINGLE-CHANNEL CPU VALUE TESTS\n\nTEST_CASE_1D_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"1D\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"1D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n]\n\nTEST_CASE_2D_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"2D\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"2D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n]\n\nTEST_CASE_3D_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"3D\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"3D\"], axis=2),\n 1e-5, # absolute tolerance\n]\n\n# MULTICHANNEL CPU VALUE TESTS, PROCESS ALONG FIRST SPATIAL AXIS\n\nTEST_CASE_1D_2CH_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"1D 2CH\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"1D 2CH\"], axis=2),\n 1e-5, # absolute tolerance\n]\n\nTEST_CASE_2D_2CH_SINE_CPU = [\n {}, # args (empty, so use default)\n cpu_input_data[\"2D 2CH\"], # Input data: Random 1D signal\n create_expected_numpy_output(cpu_input_data[\"2D 2CH\"], axis=2),\n 1e-5, # absolute tolerance\n]\n\n# GPU TEST DATA\n\nif torch.cuda.is_available():\n gpu = torch.device(\"cuda\")\n\n gpu_input_data = {}\n gpu_input_data[\"1D\"] = torch.as_tensor(hann_windowed_sine, device=gpu).unsqueeze(0).unsqueeze(0)\n gpu_input_data[\"2D\"] = (\n torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=gpu).unsqueeze(0).unsqueeze(0)\n )\n gpu_input_data[\"3D\"] = (\n torch.as_tensor(np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=gpu)\n .unsqueeze(0)\n .unsqueeze(0)\n )\n gpu_input_data[\"1D 2CH\"] = torch.as_tensor(np.stack([hann_windowed_sine] * 10, axis=1), device=gpu).unsqueeze(0)\n gpu_input_data[\"2D 2CH\"] = torch.as_tensor(\n np.stack([np.stack([hann_windowed_sine] * 10, axis=1)] * 10, axis=2), device=gpu\n ).unsqueeze(0)\n\n # SINGLE CHANNEL GPU VALUE TESTS\n\n TEST_CASE_1D_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"1D\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"1D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n ]\n\n TEST_CASE_2D_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"2D\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"2D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n ]\n\n TEST_CASE_3D_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"3D\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"3D\"], axis=2), # Expected output: FFT of signal\n 1e-5, # absolute tolerance\n ]\n\n # MULTICHANNEL GPU VALUE TESTS, PROCESS ALONG FIRST SPATIAL AXIS\n\n TEST_CASE_1D_2CH_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"1D 2CH\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"1D 2CH\"], axis=2),\n 1e-5, # absolute tolerance\n ]\n\n TEST_CASE_2D_2CH_SINE_GPU = [\n {}, # args (empty, so use default)\n gpu_input_data[\"2D 2CH\"], # Input data: Random 1D signal\n create_expected_numpy_output(gpu_input_data[\"2D 2CH\"], axis=2),\n 1e-5, # absolute tolerance\n ]\n\n# TESTS CHECKING PADDING, AXIS SELECTION ETC ARE COVERED BY test_detect_envelope.py\n\n\n@SkipIfNoModule(\"torch.fft\")\nclass TestHilbertTransformCPU(unittest.TestCase):\n @parameterized.expand(\n [\n TEST_CASE_1D_SINE_CPU,\n TEST_CASE_2D_SINE_CPU,\n TEST_CASE_3D_SINE_CPU,\n TEST_CASE_1D_2CH_SINE_CPU,\n TEST_CASE_2D_2CH_SINE_CPU,\n ]\n )\n def test_value(self, arguments, image, expected_data, atol):\n result = HilbertTransform(**arguments)(image)\n result = result.squeeze(0).squeeze(0).numpy()\n np.testing.assert_allclose(result, expected_data.squeeze(), atol=atol)\n\n\n@skip_if_no_cuda\n@SkipIfNoModule(\"torch.fft\")\nclass TestHilbertTransformGPU(unittest.TestCase):\n @parameterized.expand(\n []\n if not torch.cuda.is_available()\n else [\n TEST_CASE_1D_SINE_GPU,\n TEST_CASE_2D_SINE_GPU,\n TEST_CASE_3D_SINE_GPU,\n TEST_CASE_1D_2CH_SINE_GPU,\n TEST_CASE_2D_2CH_SINE_GPU,\n ],\n skip_on_empty=True,\n )\n def test_value(self, arguments, image, expected_data, atol):\n result = HilbertTransform(**arguments)(image)\n result = result.squeeze(0).squeeze(0).cpu().numpy()\n np.testing.assert_allclose(result, expected_data.squeeze(), atol=atol)\n\n\n@SkipIfModule(\"torch.fft\")\nclass TestHilbertTransformNoFFTMod(unittest.TestCase):\n def test_no_fft_module_error(self):\n self.assertRaises(OptionalImportError, HilbertTransform(), torch.randn(1, 1, 10))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.transforms import FillHolesd\nfrom monai.utils.enums import CommonKeys\nfrom tests.utils import TEST_NDARRAYS, assert_allclose, clone\n\ngrid_1_raw = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]\n\ngrid_2_raw = [[0, 1, 0], [1, 0, 1], [0, 1, 0]]\n\ngrid_3_raw = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]\n\ngrid_4_raw = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]\n\ngrid_1 = torch.tensor([grid_1_raw])\n\ngrid_2 = torch.tensor([grid_2_raw])\n\ngrid_3 = torch.tensor([grid_3_raw])\n\ngrid_4 = torch.tensor([grid_4_raw])\n\ngrid_5 = torch.tensor([[[1, 1, 1], [1, 0, 0], [1, 1, 1]]])\n\ngrid_6 = torch.tensor([[[1, 1, 2, 2, 2], [1, 0, 2, 0, 2], [1, 1, 2, 2, 2]]])\n\ngrid_7 = torch.tensor([[[1, 1, 2, 2, 2], [1, 0, 2, 2, 2], [1, 1, 2, 2, 2]]])\n\nTEST_CASE_0 = [\"enclosed_default_full_connectivity_default_applied_labels\", {}, grid_1, grid_3]\n\nTEST_CASE_1 = [\"enclosed_full_connectivity_default_applied_labels\", {\"connectivity\": 2}, grid_1, grid_3]\n\nTEST_CASE_2 = [\n \"enclosed_full_connectivity_applied_labels_same_single\",\n {\"connectivity\": 2, \"applied_labels\": 1},\n grid_1,\n grid_3,\n]\n\nTEST_CASE_3 = [\n \"enclosed_full_connectivity_applied_labels_same_list\",\n {\"connectivity\": 2, \"applied_labels\": [1]},\n grid_1,\n grid_3,\n]\n\nTEST_CASE_4 = [\n \"enclosed_full_connectivity_applied_labels_other_single\",\n {\"connectivity\": 2, \"applied_labels\": 2},\n grid_1,\n grid_1,\n]\n\nTEST_CASE_5 = [\n \"enclosed_full_connectivity_applied_labels_other_list\",\n {\"connectivity\": 2, \"applied_labels\": [2]},\n grid_1,\n grid_1,\n]\n\nTEST_CASE_6 = [\n \"enclosed_full_connectivity_applied_labels_same_and_other\",\n {\"connectivity\": 2, \"applied_labels\": [1, 2]},\n grid_1,\n grid_3,\n]\n\nTEST_CASE_7 = [\"enclosed_connectivity_1_default_applied_labels\", {\"connectivity\": 1}, grid_1, grid_3]\n\nTEST_CASE_8 = [\"enclosed_connectivity_1_default_applied_labels\", {\"connectivity\": 1}, grid_2, grid_4]\n\nTEST_CASE_9 = [\"open_full_connectivity_default_applied_labels\", {\"connectivity\": 2}, grid_2, grid_2]\n\nTEST_CASE_10 = [\"open_to_edge_connectivity_1_default_applied_labels\", {\"connectivity\": 1}, grid_5, grid_5]\n\nTEST_CASE_11 = [\"open_to_other_label_connectivity_1_default_applied_labels\", {\"connectivity\": 1}, grid_6, grid_7]\n\nTEST_CASE_12 = [\n \"open_to_other_label_connectivity_1_applied_labels_other\",\n {\"connectivity\": 1, \"applied_labels\": 1},\n grid_6,\n grid_6,\n]\n\nTEST_CASE_13 = [\n \"numpy_enclosed_default_full_connectivity_default_applied_labels\",\n {},\n grid_1.cpu().numpy(),\n grid_3.cpu().numpy(),\n]\n\nTEST_CASE_14 = [\n \"3D_enclosed_full_connectivity_default_applied_labels\",\n {\"connectivity\": 3},\n torch.tensor([[grid_3_raw, grid_1_raw, grid_3_raw]]),\n torch.tensor([[grid_3_raw, grid_3_raw, grid_3_raw]]),\n]\n\nTEST_CASE_15 = [\n \"3D_enclosed_connectivity_1_default_applied_labels\",\n {\"connectivity\": 1},\n torch.tensor([[grid_4_raw, grid_2_raw, grid_4_raw]]),\n torch.tensor([[grid_4_raw, grid_4_raw, grid_4_raw]]),\n]\n\nTEST_CASE_16 = [\n \"3D_open_full_connectivity_default_applied_labels\",\n {\"connectivity\": 3},\n torch.tensor([[grid_4_raw, grid_2_raw, grid_4_raw]]),\n torch.tensor([[grid_4_raw, grid_2_raw, grid_4_raw]]),\n]\n\nTEST_CASE_17 = [\n \"3D_open_to_edge_connectivity_1_default_applied_labels\",\n {\"connectivity\": 1},\n torch.tensor([[grid_1_raw, grid_1_raw, grid_3_raw]]),\n torch.tensor([[grid_1_raw, grid_1_raw, grid_3_raw]]),\n]\n\nTEST_CASE_18 = [\n \"enclosed_full_connectivity_applied_labels_with_background\",\n {\"connectivity\": 2, \"applied_labels\": [0, 1]},\n grid_1,\n grid_3,\n]\n\nTEST_CASE_19 = [\n \"enclosed_full_connectivity_applied_labels_only_background\",\n {\"connectivity\": 2, \"applied_labels\": [0]},\n grid_1,\n grid_1,\n]\n\nTEST_CASE_20 = [\n \"one-hot_enclosed_connectivity_1_default_applied_labels\",\n {\"connectivity\": 1},\n torch.tensor([grid_1_raw, grid_1_raw, grid_2_raw]),\n torch.tensor([grid_1_raw, grid_3_raw, grid_4_raw]),\n]\n\nTEST_CASE_21 = [\n \"one-hot_enclosed_connectivity_1_applied_labels_2\",\n {\"connectivity\": 1, \"applied_labels\": [2]},\n torch.tensor([grid_1_raw, grid_1_raw, grid_2_raw]),\n torch.tensor([grid_1_raw, grid_1_raw, grid_4_raw]),\n]\n\nTEST_CASE_22 = [\n \"one-hot_full_connectivity_applied_labels_2\",\n {\"connectivity\": 2},\n torch.tensor([grid_1_raw, grid_1_raw, grid_2_raw]),\n torch.tensor([grid_1_raw, grid_3_raw, grid_2_raw]),\n]\n\nVALID_CASES = [\n TEST_CASE_0,\n TEST_CASE_1,\n TEST_CASE_2,\n TEST_CASE_3,\n TEST_CASE_4,\n TEST_CASE_5,\n TEST_CASE_6,\n TEST_CASE_7,\n TEST_CASE_8,\n TEST_CASE_9,\n TEST_CASE_10,\n TEST_CASE_11,\n TEST_CASE_12,\n TEST_CASE_13,\n TEST_CASE_14,\n TEST_CASE_15,\n TEST_CASE_16,\n TEST_CASE_17,\n TEST_CASE_18,\n TEST_CASE_19,\n TEST_CASE_20,\n TEST_CASE_21,\n TEST_CASE_22,\n]\n\nITEST_CASE_1 = [\"invalid_image_data_type\", {}, [[[[1, 1, 1]]]], NotImplementedError]\n\nINVALID_CASES = [ITEST_CASE_1]\n\n\nclass TestFillHoles(unittest.TestCase):\n @parameterized.expand(VALID_CASES)\n def test_correct_results(self, _, args, input_image, expected):\n key = CommonKeys.IMAGE\n converter = FillHolesd(keys=key, **args)\n for p in TEST_NDARRAYS:\n result = converter({key: p(clone(input_image))})[key]\n assert_allclose(result, p(expected))\n\n @parameterized.expand(INVALID_CASES)\n def test_raise_exception(self, _, args, input_image, expected_error):\n key = CommonKeys.IMAGE\n with self.assertRaises(expected_error):\n converter = FillHolesd(keys=key, **args)\n if isinstance(input_image, torch.Tensor) and torch.cuda.is_available():\n _ = converter({key: clone(input_image).cuda()})[key]\n else:\n _ = converter({key: clone(input_image)})[key]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.data import CacheDataset, DataLoader, Dataset\nfrom monai.transforms import Compose, DataStatsd, Randomizable, SimulateDelayd\nfrom monai.utils import set_determinism\n\nTEST_CASE_1 = [[{\"image\": np.asarray([1, 2, 3])}, {\"image\": np.asarray([4, 5])}]]\n\nTEST_CASE_2 = [[{\"label\": torch.as_tensor([[3], [2]])}, {\"label\": np.asarray([[1], [2]])}]]\n\n\nclass TestDataLoader(unittest.TestCase):\n def test_values(self):\n datalist = [\n {\"image\": \"spleen_19.nii.gz\", \"label\": \"spleen_label_19.nii.gz\"},\n {\"image\": \"spleen_31.nii.gz\", \"label\": \"spleen_label_31.nii.gz\"},\n ]\n transform = Compose(\n [\n DataStatsd(keys=[\"image\", \"label\"], data_shape=False, value_range=False, data_value=True),\n SimulateDelayd(keys=[\"image\", \"label\"], delay_time=0.1),\n ]\n )\n dataset = CacheDataset(data=datalist, transform=transform, cache_rate=0.5, cache_num=1)\n n_workers = 0 if sys.platform == \"win32\" else 2\n dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=n_workers)\n for d in dataloader:\n self.assertEqual(d[\"image\"][0], \"spleen_19.nii.gz\")\n self.assertEqual(d[\"image\"][1], \"spleen_31.nii.gz\")\n self.assertEqual(d[\"label\"][0], \"spleen_label_19.nii.gz\")\n self.assertEqual(d[\"label\"][1], \"spleen_label_31.nii.gz\")\n\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2])\n def test_exception(self, datalist):\n dataset = Dataset(data=datalist, transform=None)\n dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)\n with self.assertRaisesRegex((TypeError, RuntimeError), \"Collate error on the key\"):\n for _ in dataloader:\n pass\n\n\nclass _RandomDataset(torch.utils.data.Dataset, Randomizable):\n def __getitem__(self, index):\n return self.R.randint(0, 1000, (1,))\n\n def __len__(self):\n return 8\n\n\nclass TestLoaderRandom(unittest.TestCase):\n \"\"\"\n Testing data loader working with the randomizable interface\n \"\"\"\n\n def setUp(self):\n set_determinism(0)\n\n def tearDown(self):\n set_determinism(None)\n\n def test_randomize(self):\n dataset = _RandomDataset()\n dataloader = DataLoader(dataset, batch_size=2, num_workers=3)\n output = []\n for _ in range(2):\n for batch in dataloader:\n output.extend(batch.data.numpy().flatten().tolist())\n self.assertListEqual(output, [594, 170, 524, 778, 370, 906, 292, 589, 762, 763, 156, 886, 42, 405, 221, 166])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.data.image_reader import ITKReader, NibabelReader, NrrdReader, PILReader\nfrom monai.data.image_writer import ITKWriter, NibabelWriter, PILWriter, register_writer, resolve_writer\nfrom monai.transforms import LoadImage, SaveImage, moveaxis\nfrom monai.utils import OptionalImportError\nfrom tests.utils import TEST_NDARRAYS, assert_allclose\n\n\nclass TestLoadSaveNifti(unittest.TestCase):\n def setUp(self):\n self.test_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir, ignore_errors=True)\n\n def nifti_rw(self, test_data, reader, writer, dtype, resample=True):\n test_data = test_data.astype(dtype)\n ndim = len(test_data.shape) - 1\n for p in TEST_NDARRAYS:\n output_ext = \".nii.gz\"\n filepath = f\"testfile_{ndim}d\"\n saver = SaveImage(\n output_dir=self.test_dir, output_ext=output_ext, resample=resample, separate_folder=False, writer=writer\n )\n saver(\n p(test_data),\n {\n \"filename_or_obj\": f\"{filepath}.png\",\n \"affine\": np.eye(4),\n \"original_affine\": np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]),\n },\n )\n saved_path = os.path.join(self.test_dir, filepath + \"_trans\" + output_ext)\n self.assertTrue(os.path.exists(saved_path))\n loader = LoadImage(reader=reader, squeeze_non_spatial_dims=True)\n data, meta = loader(saved_path)\n if meta[\"original_channel_dim\"] == -1:\n _test_data = moveaxis(test_data, 0, -1)\n else:\n _test_data = test_data[0]\n if resample:\n _test_data = moveaxis(_test_data, 0, 1)\n assert_allclose(data, _test_data)\n\n @parameterized.expand(itertools.product([NibabelReader, ITKReader], [NibabelWriter, \"ITKWriter\"]))\n def test_2d(self, reader, writer):\n test_data = np.arange(48, dtype=np.uint8).reshape(1, 6, 8)\n self.nifti_rw(test_data, reader, writer, np.uint8)\n self.nifti_rw(test_data, reader, writer, np.float32)\n\n @parameterized.expand(itertools.product([NibabelReader, ITKReader], [NibabelWriter, ITKWriter]))\n def test_3d(self, reader, writer):\n test_data = np.arange(48, dtype=np.uint8).reshape(1, 2, 3, 8)\n self.nifti_rw(test_data, reader, writer, int)\n self.nifti_rw(test_data, reader, writer, int, False)\n\n @parameterized.expand(itertools.product([NibabelReader, ITKReader], [\"NibabelWriter\", ITKWriter]))\n def test_4d(self, reader, writer):\n test_data = np.arange(48, dtype=np.uint8).reshape(2, 1, 3, 8)\n self.nifti_rw(test_data, reader, writer, np.float16)\n\n\nclass TestLoadSavePNG(unittest.TestCase):\n def setUp(self):\n self.test_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir, ignore_errors=True)\n\n def png_rw(self, test_data, reader, writer, dtype, resample=True):\n test_data = test_data.astype(dtype)\n ndim = len(test_data.shape) - 1\n for p in TEST_NDARRAYS:\n output_ext = \".png\"\n filepath = f\"testfile_{ndim}d\"\n saver = SaveImage(\n output_dir=self.test_dir, output_ext=output_ext, resample=resample, separate_folder=False, writer=writer\n )\n saver(p(test_data), {\"filename_or_obj\": f\"{filepath}.png\", \"spatial_shape\": (6, 8)})\n saved_path = os.path.join(self.test_dir, filepath + \"_trans\" + output_ext)\n self.assertTrue(os.path.exists(saved_path))\n loader = LoadImage(reader=reader)\n data, meta = loader(saved_path)\n if meta[\"original_channel_dim\"] == -1:\n _test_data = moveaxis(test_data, 0, -1)\n else:\n _test_data = test_data[0]\n assert_allclose(data, _test_data)\n\n @parameterized.expand(itertools.product([PILReader, ITKReader], [PILWriter, ITKWriter]))\n def test_2d(self, reader, writer):\n test_data = np.arange(48, dtype=np.uint8).reshape(1, 6, 8)\n self.png_rw(test_data, reader, writer, np.uint8)\n\n @parameterized.expand(itertools.product([PILReader, ITKReader], [\"monai.data.PILWriter\", ITKWriter]))\n def test_rgb(self, reader, writer):\n test_data = np.arange(48, dtype=np.uint8).reshape(3, 2, 8)\n self.png_rw(test_data, reader, writer, np.uint8, False)\n\n\nclass TestRegRes(unittest.TestCase):\n def test_0_default(self):\n self.assertTrue(len(resolve_writer(\".png\")) > 0, \"has png writer\")\n self.assertTrue(len(resolve_writer(\".nrrd\")) > 0, \"has nrrd writer\")\n self.assertTrue(len(resolve_writer(\"unknown\")) > 0, \"has writer\")\n register_writer(\"unknown1\", lambda: (_ for _ in ()).throw(OptionalImportError))\n with self.assertRaises(OptionalImportError):\n resolve_writer(\"unknown1\")\n\n def test_1_new(self):\n register_writer(\"new\", lambda x: x + 1)\n register_writer(\"new2\", lambda x: x + 1)\n self.assertEqual(resolve_writer(\"new\")[0](0), 1)\n\n\nclass TestLoadSaveNrrd(unittest.TestCase):\n def setUp(self):\n self.test_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir, ignore_errors=True)\n\n def nrrd_rw(self, test_data, reader, writer, dtype, resample=True):\n test_data = test_data.astype(dtype)\n ndim = len(test_data.shape)\n for p in TEST_NDARRAYS:\n output_ext = \".nrrd\"\n filepath = f\"testfile_{ndim}d\"\n saver = SaveImage(\n output_dir=self.test_dir, output_ext=output_ext, resample=resample, separate_folder=False, writer=writer\n )\n saver(p(test_data), {\"filename_or_obj\": f\"{filepath}{output_ext}\", \"spatial_shape\": test_data.shape})\n saved_path = os.path.join(self.test_dir, filepath + \"_trans\" + output_ext)\n loader = LoadImage(reader=reader)\n data, meta = loader(saved_path)\n assert_allclose(data, test_data)\n\n @parameterized.expand(itertools.product([NrrdReader, ITKReader], [ITKWriter, ITKWriter]))\n def test_2d(self, reader, writer):\n test_data = np.random.randn(8, 8).astype(np.float32)\n self.nrrd_rw(test_data, reader, writer, np.float32)\n\n @parameterized.expand(itertools.product([NrrdReader, ITKReader], [ITKWriter, ITKWriter]))\n def test_3d(self, reader, writer):\n test_data = np.random.randn(8, 8, 8).astype(np.float32)\n self.nrrd_rw(test_data, reader, writer, np.float32)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.transforms import VoteEnsemble\nfrom tests.utils import TEST_NDARRAYS, assert_allclose\n\nTESTS = []\nfor p in TEST_NDARRAYS:\n # shape: [2, 1, 1]\n TESTS.append(\n [\n {\"num_classes\": None},\n [p(torch.tensor([[[1]], [[0]]])), p(torch.tensor([[[1]], [[0]]])), p(torch.tensor([[[0]], [[1]]]))],\n p(torch.tensor([[[1.0]], [[0.0]]])),\n ]\n )\n\n # shape: [1, 2, 1, 1]\n TESTS.append(\n [\n {\"num_classes\": None},\n p(\n torch.stack(\n [torch.tensor([[[[1]], [[0]]]]), torch.tensor([[[[1]], [[0]]]]), torch.tensor([[[[0]], [[1]]]])]\n )\n ),\n p(torch.tensor([[[[1.0]], [[0.0]]]])),\n ]\n )\n\n # shape: [1, 2, 1]\n TESTS.append(\n [\n {\"num_classes\": 3},\n [p(torch.tensor([[[0], [2]]])), p(torch.tensor([[[0], [2]]])), p(torch.tensor([[[1], [1]]]))],\n p(torch.tensor([[[0], [2]]])),\n ]\n )\n\n # shape: [1, 2, 1]\n TESTS.append(\n [\n {\"num_classes\": 5},\n [p(torch.tensor([[[0], [2]]])), p(torch.tensor([[[0], [2]]])), p(torch.tensor([[[1], [1]]]))],\n p(torch.tensor([[[0], [2]]])),\n ]\n )\n\n # shape: [1]\n TESTS.append(\n [{\"num_classes\": 3}, [p(torch.tensor([2])), p(torch.tensor([2])), p(torch.tensor([1]))], p(torch.tensor([2]))]\n )\n\n # shape: 1\n TESTS.append([{\"num_classes\": 3}, [p(torch.tensor(2)), p(torch.tensor(2)), p(torch.tensor(1))], p(torch.tensor(2))])\n\n\nclass TestVoteEnsemble(unittest.TestCase):\n @parameterized.expand(TESTS)\n def test_value(self, input_param, img, expected_value):\n result = VoteEnsemble(**input_param)(img)\n if isinstance(img, torch.Tensor):\n self.assertIsInstance(result, torch.Tensor)\n self.assertEqual(result.device, img.device)\n assert_allclose(result, expected_value)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable, Iterable, Sequence\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom monai.config import IgniteInfo, KeysCollection\nfrom monai.engines.utils import IterationEvents, default_metric_cmp_fn, default_prepare_batch\nfrom monai.engines.workflow import Workflow\nfrom monai.inferers import Inferer, SimpleInferer\nfrom monai.networks.utils import eval_mode, train_mode\nfrom monai.transforms import Transform\nfrom monai.utils import ForwardMode, ensure_tuple, min_version, optional_import\nfrom monai.utils.enums import CommonKeys as Keys\nfrom monai.utils.module import look_up_option\n\nif TYPE_CHECKING:\n from ignite.engine import Engine, EventEnum\n from ignite.metrics import Metric\nelse:\n Engine, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Engine\")\n Metric, _ = optional_import(\"ignite.metrics\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Metric\")\n EventEnum, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"EventEnum\")\n\n__all__ = [\"Evaluator\", \"SupervisedEvaluator\", \"EnsembleEvaluator\"]\n\n\nclass Evaluator(Workflow):\n \"\"\"\n Base class for all kinds of evaluators, inherits from Workflow.\n\n Args:\n device: an object representing the device on which to run.\n val_data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader.\n epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function to parse expected data (usually `image`, `label` and other network args)\n from `engine.state.batch` for every iteration, for more details please refer to:\n https://pytorch.org/ignite/generated/ignite.engine.create_supervised_trainer.html.\n iteration_update: the callable function for every iteration, expect to accept `engine`\n and `engine.state.batch` as inputs, return data will be stored in `engine.state.output`.\n if not provided, use `self._iteration()` instead. for more details please refer to:\n https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html.\n postprocessing: execute additional transformation for the model output data.\n Typically, several Tensor based transforms composed by `Compose`.\n key_val_metric: compute metric when every iteration completed, and save average value to\n engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the\n checkpoint into files.\n additional_metrics: more Ignite metrics that also attach to Ignite Engine.\n metric_cmp_fn: function to compare current key metric with previous best key metric value,\n it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update\n `best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.\n val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:\n CheckpointHandler, StatsHandler, etc.\n amp: whether to enable auto-mixed-precision evaluation, default is False.\n mode: model forward mode during evaluation, should be 'eval' or 'train',\n which maps to `model.eval()` or `model.train()`, default to 'eval'.\n event_names: additional custom ignite events that will register to the engine.\n new events can be a list of str or `ignite.engine.events.EventEnum`.\n event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.\n for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html\n #ignite.engine.engine.Engine.register_events.\n decollate: whether to decollate the batch-first data to a list of data after model computation,\n recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.\n default to `True`.\n to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for\n `device`, `non_blocking`.\n amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details:\n https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast.\n\n \"\"\"\n\n def __init__(\n self,\n device: torch.device,\n val_data_loader: Iterable | DataLoader,\n epoch_length: int | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = default_prepare_batch,\n iteration_update: Callable[[Engine, Any], Any] | None = None,\n postprocessing: Transform | None = None,\n key_val_metric: dict[str, Metric] | None = None,\n additional_metrics: dict[str, Metric] | None = None,\n metric_cmp_fn: Callable = default_metric_cmp_fn,\n val_handlers: Sequence | None = None,\n amp: bool = False,\n mode: ForwardMode | str = ForwardMode.EVAL,\n event_names: list[str | EventEnum] | None = None,\n event_to_attr: dict | None = None,\n decollate: bool = True,\n to_kwargs: dict | None = None,\n amp_kwargs: dict | None = None,\n ) -> None:\n super().__init__(\n device=device,\n max_epochs=1,\n data_loader=val_data_loader,\n epoch_length=epoch_length,\n non_blocking=non_blocking,\n prepare_batch=prepare_batch,\n iteration_update=iteration_update,\n postprocessing=postprocessing,\n key_metric=key_val_metric,\n additional_metrics=additional_metrics,\n metric_cmp_fn=metric_cmp_fn,\n handlers=val_handlers,\n amp=amp,\n event_names=event_names,\n event_to_attr=event_to_attr,\n decollate=decollate,\n to_kwargs=to_kwargs,\n amp_kwargs=amp_kwargs,\n )\n mode = look_up_option(mode, ForwardMode)\n if mode == ForwardMode.EVAL:\n self.mode = eval_mode\n elif mode == ForwardMode.TRAIN:\n self.mode = train_mode\n else:\n raise ValueError(f\"unsupported mode: {mode}, should be 'eval' or 'train'.\")\n\n def run(self, global_epoch: int = 1) -> None:\n \"\"\"\n Execute validation/evaluation based on Ignite Engine.\n\n Args:\n global_epoch: the overall epoch if during a training. evaluator engine can get it from trainer.\n\n \"\"\"\n # init env value for current validation process\n self.state.max_epochs = global_epoch\n self.state.epoch = global_epoch - 1\n self.state.iteration = 0\n super().run()\n\n def get_validation_stats(self) -> dict[str, float]:\n return {\"best_validation_metric\": self.state.best_metric, \"best_validation_epoch\": self.state.best_metric_epoch}\n\n\nclass SupervisedEvaluator(Evaluator):\n \"\"\"\n Standard supervised evaluation method with image and label(optional), inherits from evaluator and Workflow.\n\n Args:\n device: an object representing the device on which to run.\n val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.\n network: network to evaluate in the evaluator, should be regular PyTorch `torch.nn.Module`.\n epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function to parse expected data (usually `image`, `label` and other network args)\n from `engine.state.batch` for every iteration, for more details please refer to:\n https://pytorch.org/ignite/generated/ignite.engine.create_supervised_trainer.html.\n iteration_update: the callable function for every iteration, expect to accept `engine`\n and `engine.state.batch` as inputs, return data will be stored in `engine.state.output`.\n if not provided, use `self._iteration()` instead. for more details please refer to:\n https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html.\n inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.\n postprocessing: execute additional transformation for the model output data.\n Typically, several Tensor based transforms composed by `Compose`.\n key_val_metric: compute metric when every iteration completed, and save average value to\n engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the\n checkpoint into files.\n additional_metrics: more Ignite metrics that also attach to Ignite Engine.\n metric_cmp_fn: function to compare current key metric with previous best key metric value,\n it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update\n `best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.\n val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:\n CheckpointHandler, StatsHandler, etc.\n amp: whether to enable auto-mixed-precision evaluation, default is False.\n mode: model forward mode during evaluation, should be 'eval' or 'train',\n which maps to `model.eval()` or `model.train()`, default to 'eval'.\n event_names: additional custom ignite events that will register to the engine.\n new events can be a list of str or `ignite.engine.events.EventEnum`.\n event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.\n for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html\n #ignite.engine.engine.Engine.register_events.\n decollate: whether to decollate the batch-first data to a list of data after model computation,\n recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.\n default to `True`.\n to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for\n `device`, `non_blocking`.\n amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details:\n https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast.\n\n \"\"\"\n\n def __init__(\n self,\n device: torch.device,\n val_data_loader: Iterable | DataLoader,\n network: torch.nn.Module,\n epoch_length: int | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = default_prepare_batch,\n iteration_update: Callable[[Engine, Any], Any] | None = None,\n inferer: Inferer | None = None,\n postprocessing: Transform | None = None,\n key_val_metric: dict[str, Metric] | None = None,\n additional_metrics: dict[str, Metric] | None = None,\n metric_cmp_fn: Callable = default_metric_cmp_fn,\n val_handlers: Sequence | None = None,\n amp: bool = False,\n mode: ForwardMode | str = ForwardMode.EVAL,\n event_names: list[str | EventEnum] | None = None,\n event_to_attr: dict | None = None,\n decollate: bool = True,\n to_kwargs: dict | None = None,\n amp_kwargs: dict | None = None,\n ) -> None:\n super().__init__(\n device=device,\n val_data_loader=val_data_loader,\n epoch_length=epoch_length,\n non_blocking=non_blocking,\n prepare_batch=prepare_batch,\n iteration_update=iteration_update,\n postprocessing=postprocessing,\n key_val_metric=key_val_metric,\n additional_metrics=additional_metrics,\n metric_cmp_fn=metric_cmp_fn,\n val_handlers=val_handlers,\n amp=amp,\n mode=mode,\n event_names=event_names,\n event_to_attr=event_to_attr,\n decollate=decollate,\n to_kwargs=to_kwargs,\n amp_kwargs=amp_kwargs,\n )\n\n self.network = network\n self.inferer = SimpleInferer() if inferer is None else inferer\n\n def _iteration(self, engine: SupervisedEvaluator, batchdata: dict[str, torch.Tensor]):\n \"\"\"\n callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.\n Return below items in a dictionary:\n - IMAGE: image Tensor data for model input, already moved to device.\n - LABEL: label Tensor data corresponding to the image, already moved to device.\n - PRED: prediction result of model.\n\n Args:\n engine: `SupervisedEvaluator` to execute operation for an iteration.\n batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.\n\n Raises:\n ValueError: When ``batchdata`` is None.\n\n \"\"\"\n if batchdata is None:\n raise ValueError(\"Must provide batch data for current iteration.\")\n batch = engine.prepare_batch(batchdata, engine.state.device, engine.non_blocking, **engine.to_kwargs)\n if len(batch) == 2:\n inputs, targets = batch\n args: tuple = ()\n kwargs: dict = {}\n else:\n inputs, targets, args, kwargs = batch\n\n # put iteration outputs into engine.state\n engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets}\n\n # execute forward computation\n with engine.mode(engine.network):\n\n if engine.amp:\n with torch.cuda.amp.autocast(**engine.amp_kwargs):\n engine.state.output[Keys.PRED] = engine.inferer(inputs, engine.network, *args, **kwargs)\n else:\n engine.state.output[Keys.PRED] = engine.inferer(inputs, engine.network, *args, **kwargs)\n engine.fire_event(IterationEvents.FORWARD_COMPLETED)\n engine.fire_event(IterationEvents.MODEL_COMPLETED)\n\n return engine.state.output\n\n\nclass EnsembleEvaluator(Evaluator):\n \"\"\"\n Ensemble evaluation for multiple models, inherits from evaluator and Workflow.\n It accepts a list of models for inference and outputs a list of predictions for further operations.\n\n Args:\n device: an object representing the device on which to run.\n val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.\n epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.\n networks: networks to evaluate in order in the evaluator, should be regular PyTorch `torch.nn.Module`.\n pred_keys: the keys to store every prediction data.\n the length must exactly match the number of networks.\n if None, use \"pred_{index}\" as key corresponding to N networks, index from `0` to `N-1`.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function to parse expected data (usually `image`, `label` and other network args)\n from `engine.state.batch` for every iteration, for more details please refer to:\n https://pytorch.org/ignite/generated/ignite.engine.create_supervised_trainer.html.\n iteration_update: the callable function for every iteration, expect to accept `engine`\n and `engine.state.batch` as inputs, return data will be stored in `engine.state.output`.\n if not provided, use `self._iteration()` instead. for more details please refer to:\n https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html.\n inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.\n postprocessing: execute additional transformation for the model output data.\n Typically, several Tensor based transforms composed by `Compose`.\n key_val_metric: compute metric when every iteration completed, and save average value to\n engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the\n checkpoint into files.\n additional_metrics: more Ignite metrics that also attach to Ignite Engine.\n metric_cmp_fn: function to compare current key metric with previous best key metric value,\n it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update\n `best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.\n val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:\n CheckpointHandler, StatsHandler, etc.\n amp: whether to enable auto-mixed-precision evaluation, default is False.\n mode: model forward mode during evaluation, should be 'eval' or 'train',\n which maps to `model.eval()` or `model.train()`, default to 'eval'.\n event_names: additional custom ignite events that will register to the engine.\n new events can be a list of str or `ignite.engine.events.EventEnum`.\n event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.\n for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html\n #ignite.engine.engine.Engine.register_events.\n decollate: whether to decollate the batch-first data to a list of data after model computation,\n recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.\n default to `True`.\n to_kwargs: dict of other args for `prepare_batch` API when converting the input data, except for\n `device`, `non_blocking`.\n amp_kwargs: dict of the args for `torch.cuda.amp.autocast()` API, for more details:\n https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast.\n\n \"\"\"\n\n def __init__(\n self,\n device: torch.device,\n val_data_loader: Iterable | DataLoader,\n networks: Sequence[torch.nn.Module],\n pred_keys: KeysCollection | None = None,\n epoch_length: int | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = default_prepare_batch,\n iteration_update: Callable[[Engine, Any], Any] | None = None,\n inferer: Inferer | None = None,\n postprocessing: Transform | None = None,\n key_val_metric: dict[str, Metric] | None = None,\n additional_metrics: dict[str, Metric] | None = None,\n metric_cmp_fn: Callable = default_metric_cmp_fn,\n val_handlers: Sequence | None = None,\n amp: bool = False,\n mode: ForwardMode | str = ForwardMode.EVAL,\n event_names: list[str | EventEnum] | None = None,\n event_to_attr: dict | None = None,\n decollate: bool = True,\n to_kwargs: dict | None = None,\n amp_kwargs: dict | None = None,\n ) -> None:\n super().__init__(\n device=device,\n val_data_loader=val_data_loader,\n epoch_length=epoch_length,\n non_blocking=non_blocking,\n prepare_batch=prepare_batch,\n iteration_update=iteration_update,\n postprocessing=postprocessing,\n key_val_metric=key_val_metric,\n additional_metrics=additional_metrics,\n metric_cmp_fn=metric_cmp_fn,\n val_handlers=val_handlers,\n amp=amp,\n mode=mode,\n event_names=event_names,\n event_to_attr=event_to_attr,\n decollate=decollate,\n to_kwargs=to_kwargs,\n amp_kwargs=amp_kwargs,\n )\n\n self.networks = ensure_tuple(networks)\n self.pred_keys = (\n [f\"{Keys.PRED}_{i}\" for i in range(len(self.networks))] if pred_keys is None else ensure_tuple(pred_keys)\n )\n if len(self.pred_keys) != len(self.networks):\n raise ValueError(\"length of `pred_keys` must be same as the length of `networks`.\")\n self.inferer = SimpleInferer() if inferer is None else inferer\n\n def _iteration(self, engine: EnsembleEvaluator, batchdata: dict[str, torch.Tensor]):\n \"\"\"\n callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.\n Return below items in a dictionary:\n - IMAGE: image Tensor data for model input, already moved to device.\n - LABEL: label Tensor data corresponding to the image, already moved to device.\n - pred_keys[0]: prediction result of network 0.\n - pred_keys[1]: prediction result of network 1.\n - ... ...\n - pred_keys[N]: prediction result of network N.\n\n Args:\n engine: `EnsembleEvaluator` to execute operation for an iteration.\n batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.\n\n Raises:\n ValueError: When ``batchdata`` is None.\n\n \"\"\"\n if batchdata is None:\n raise ValueError(\"Must provide batch data for current iteration.\")\n batch = engine.prepare_batch(batchdata, engine.state.device, engine.non_blocking, **engine.to_kwargs)\n if len(batch) == 2:\n inputs, targets = batch\n args: tuple = ()\n kwargs: dict = {}\n else:\n inputs, targets, args, kwargs = batch\n\n # put iteration outputs into engine.state\n engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets}\n\n for idx, network in enumerate(engine.networks):\n with engine.mode(network):\n if engine.amp:\n with torch.cuda.amp.autocast(**engine.amp_kwargs):\n if isinstance(engine.state.output, dict):\n engine.state.output.update(\n {engine.pred_keys[idx]: engine.inferer(inputs, network, *args, **kwargs)}\n )\n else:\n if isinstance(engine.state.output, dict):\n engine.state.output.update(\n {engine.pred_keys[idx]: engine.inferer(inputs, network, *args, **kwargs)}\n )\n engine.fire_event(IterationEvents.FORWARD_COMPLETED)\n engine.fire_event(IterationEvents.MODEL_COMPLETED)\n\n return engine.state.output\n",
"# Copyright 2020 - 2022 -> (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Sequence, Tuple, Type, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as checkpoint\nfrom torch.nn import LayerNorm\n\nfrom monai.networks.blocks import MLPBlock as Mlp\nfrom monai.networks.blocks import PatchEmbed, UnetOutBlock, UnetrBasicBlock, UnetrUpBlock\nfrom monai.networks.layers import DropPath, trunc_normal_\nfrom monai.utils import ensure_tuple_rep, optional_import\n\nrearrange, _ = optional_import(\"einops\", name=\"rearrange\")\n\n\nclass SwinUNETR(nn.Module):\n \"\"\"\n Swin UNETR based on: \"Hatamizadeh et al.,\n Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images\n <https://arxiv.org/abs/2201.01266>\"\n \"\"\"\n\n def __init__(\n self,\n img_size: Union[Sequence[int], int],\n in_channels: int,\n out_channels: int,\n depths: Sequence[int] = (2, 2, 2, 2),\n num_heads: Sequence[int] = (3, 6, 12, 24),\n feature_size: int = 24,\n norm_name: Union[Tuple, str] = \"instance\",\n drop_rate: float = 0.0,\n attn_drop_rate: float = 0.0,\n dropout_path_rate: float = 0.0,\n normalize: bool = True,\n use_checkpoint: bool = False,\n spatial_dims: int = 3,\n ) -> None:\n \"\"\"\n Args:\n img_size: dimension of input image.\n in_channels: dimension of input channels.\n out_channels: dimension of output channels.\n feature_size: dimension of network feature size.\n depths: number of layers in each stage.\n num_heads: number of attention heads.\n norm_name: feature normalization type and arguments.\n drop_rate: dropout rate.\n attn_drop_rate: attention dropout rate.\n dropout_path_rate: drop path rate.\n normalize: normalize output intermediate features in each stage.\n use_checkpoint: use gradient checkpointing for reduced memory usage.\n spatial_dims: number of spatial dims.\n\n Examples::\n\n # for 3D single channel input with size (96,96,96), 4-channel output and feature size of 48.\n >>> net = SwinUNETR(img_size=(96,96,96), in_channels=1, out_channels=4, feature_size=48)\n\n # for 3D 4-channel input with size (128,128,128), 3-channel output and (2,4,2,2) layers in each stage.\n >>> net = SwinUNETR(img_size=(128,128,128), in_channels=4, out_channels=3, depths=(2,4,2,2))\n\n # for 2D single channel input with size (96,96), 2-channel output and gradient checkpointing.\n >>> net = SwinUNETR(img_size=(96,96), in_channels=3, out_channels=2, use_checkpoint=True, spatial_dims=2)\n\n \"\"\"\n\n super().__init__()\n\n img_size = ensure_tuple_rep(img_size, spatial_dims)\n patch_size = ensure_tuple_rep(2, spatial_dims)\n window_size = ensure_tuple_rep(7, spatial_dims)\n\n if not (spatial_dims == 2 or spatial_dims == 3):\n raise ValueError(\"spatial dimension should be 2 or 3.\")\n\n for m, p in zip(img_size, patch_size):\n for i in range(5):\n if m % np.power(p, i + 1) != 0:\n raise ValueError(\"input image size (img_size) should be divisible by stage-wise image resolution.\")\n\n if not (0 <= drop_rate <= 1):\n raise ValueError(\"dropout rate should be between 0 and 1.\")\n\n if not (0 <= attn_drop_rate <= 1):\n raise ValueError(\"attention dropout rate should be between 0 and 1.\")\n\n if not (0 <= dropout_path_rate <= 1):\n raise ValueError(\"drop path rate should be between 0 and 1.\")\n\n if feature_size % 12 != 0:\n raise ValueError(\"feature_size should be divisible by 12.\")\n\n self.normalize = normalize\n\n self.swinViT = SwinTransformer(\n in_chans=in_channels,\n embed_dim=feature_size,\n window_size=window_size,\n patch_size=patch_size,\n depths=depths,\n num_heads=num_heads,\n mlp_ratio=4.0,\n qkv_bias=True,\n drop_rate=drop_rate,\n attn_drop_rate=attn_drop_rate,\n drop_path_rate=dropout_path_rate,\n norm_layer=nn.LayerNorm,\n use_checkpoint=use_checkpoint,\n spatial_dims=spatial_dims,\n )\n\n self.encoder1 = UnetrBasicBlock(\n spatial_dims=spatial_dims,\n in_channels=in_channels,\n out_channels=feature_size,\n kernel_size=3,\n stride=1,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.encoder2 = UnetrBasicBlock(\n spatial_dims=spatial_dims,\n in_channels=feature_size,\n out_channels=feature_size,\n kernel_size=3,\n stride=1,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.encoder3 = UnetrBasicBlock(\n spatial_dims=spatial_dims,\n in_channels=2 * feature_size,\n out_channels=2 * feature_size,\n kernel_size=3,\n stride=1,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.encoder4 = UnetrBasicBlock(\n spatial_dims=spatial_dims,\n in_channels=4 * feature_size,\n out_channels=4 * feature_size,\n kernel_size=3,\n stride=1,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.encoder10 = UnetrBasicBlock(\n spatial_dims=spatial_dims,\n in_channels=16 * feature_size,\n out_channels=16 * feature_size,\n kernel_size=3,\n stride=1,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.decoder5 = UnetrUpBlock(\n spatial_dims=spatial_dims,\n in_channels=16 * feature_size,\n out_channels=8 * feature_size,\n kernel_size=3,\n upsample_kernel_size=2,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.decoder4 = UnetrUpBlock(\n spatial_dims=spatial_dims,\n in_channels=feature_size * 8,\n out_channels=feature_size * 4,\n kernel_size=3,\n upsample_kernel_size=2,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.decoder3 = UnetrUpBlock(\n spatial_dims=spatial_dims,\n in_channels=feature_size * 4,\n out_channels=feature_size * 2,\n kernel_size=3,\n upsample_kernel_size=2,\n norm_name=norm_name,\n res_block=True,\n )\n self.decoder2 = UnetrUpBlock(\n spatial_dims=spatial_dims,\n in_channels=feature_size * 2,\n out_channels=feature_size,\n kernel_size=3,\n upsample_kernel_size=2,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.decoder1 = UnetrUpBlock(\n spatial_dims=spatial_dims,\n in_channels=feature_size,\n out_channels=feature_size,\n kernel_size=3,\n upsample_kernel_size=2,\n norm_name=norm_name,\n res_block=True,\n )\n\n self.out = UnetOutBlock(\n spatial_dims=spatial_dims, in_channels=feature_size, out_channels=out_channels\n ) # type: ignore\n\n def load_from(self, weights):\n\n with torch.no_grad():\n self.swinViT.patch_embed.proj.weight.copy_(weights[\"state_dict\"][\"module.patch_embed.proj.weight\"])\n self.swinViT.patch_embed.proj.bias.copy_(weights[\"state_dict\"][\"module.patch_embed.proj.bias\"])\n for bname, block in self.swinViT.layers1[0].blocks.named_children():\n block.load_from(weights, n_block=bname, layer=\"layers1\")\n self.swinViT.layers1[0].downsample.reduction.weight.copy_(\n weights[\"state_dict\"][\"module.layers1.0.downsample.reduction.weight\"]\n )\n self.swinViT.layers1[0].downsample.norm.weight.copy_(\n weights[\"state_dict\"][\"module.layers1.0.downsample.norm.weight\"]\n )\n self.swinViT.layers1[0].downsample.norm.bias.copy_(\n weights[\"state_dict\"][\"module.layers1.0.downsample.norm.bias\"]\n )\n for bname, block in self.swinViT.layers2[0].blocks.named_children():\n block.load_from(weights, n_block=bname, layer=\"layers2\")\n self.swinViT.layers2[0].downsample.reduction.weight.copy_(\n weights[\"state_dict\"][\"module.layers2.0.downsample.reduction.weight\"]\n )\n self.swinViT.layers2[0].downsample.norm.weight.copy_(\n weights[\"state_dict\"][\"module.layers2.0.downsample.norm.weight\"]\n )\n self.swinViT.layers2[0].downsample.norm.bias.copy_(\n weights[\"state_dict\"][\"module.layers2.0.downsample.norm.bias\"]\n )\n for bname, block in self.swinViT.layers3[0].blocks.named_children():\n block.load_from(weights, n_block=bname, layer=\"layers3\")\n self.swinViT.layers3[0].downsample.reduction.weight.copy_(\n weights[\"state_dict\"][\"module.layers3.0.downsample.reduction.weight\"]\n )\n self.swinViT.layers3[0].downsample.norm.weight.copy_(\n weights[\"state_dict\"][\"module.layers3.0.downsample.norm.weight\"]\n )\n self.swinViT.layers3[0].downsample.norm.bias.copy_(\n weights[\"state_dict\"][\"module.layers3.0.downsample.norm.bias\"]\n )\n for bname, block in self.swinViT.layers4[0].blocks.named_children():\n block.load_from(weights, n_block=bname, layer=\"layers4\")\n self.swinViT.layers4[0].downsample.reduction.weight.copy_(\n weights[\"state_dict\"][\"module.layers4.0.downsample.reduction.weight\"]\n )\n self.swinViT.layers4[0].downsample.norm.weight.copy_(\n weights[\"state_dict\"][\"module.layers4.0.downsample.norm.weight\"]\n )\n self.swinViT.layers4[0].downsample.norm.bias.copy_(\n weights[\"state_dict\"][\"module.layers4.0.downsample.norm.bias\"]\n )\n\n def forward(self, x_in):\n hidden_states_out = self.swinViT(x_in, self.normalize)\n enc0 = self.encoder1(x_in)\n enc1 = self.encoder2(hidden_states_out[0])\n enc2 = self.encoder3(hidden_states_out[1])\n enc3 = self.encoder4(hidden_states_out[2])\n dec4 = self.encoder10(hidden_states_out[4])\n dec3 = self.decoder5(dec4, hidden_states_out[3])\n dec2 = self.decoder4(dec3, enc3)\n dec1 = self.decoder3(dec2, enc2)\n dec0 = self.decoder2(dec1, enc1)\n out = self.decoder1(dec0, enc0)\n logits = self.out(out)\n return logits\n\n\ndef window_partition(x, window_size):\n \"\"\"window partition operation based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n\n Args:\n x: input tensor.\n window_size: local window size.\n \"\"\"\n x_shape = x.size()\n if len(x_shape) == 5:\n b, d, h, w, c = x_shape\n x = x.view(\n b,\n d // window_size[0],\n window_size[0],\n h // window_size[1],\n window_size[1],\n w // window_size[2],\n window_size[2],\n c,\n )\n windows = (\n x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, window_size[0] * window_size[1] * window_size[2], c)\n )\n elif len(x_shape) == 4:\n b, h, w, c = x.shape\n x = x.view(b, h // window_size[0], window_size[0], w // window_size[1], window_size[1], c)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0] * window_size[1], c)\n return windows\n\n\ndef window_reverse(windows, window_size, dims):\n \"\"\"window reverse operation based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n\n Args:\n windows: windows tensor.\n window_size: local window size.\n dims: dimension values.\n \"\"\"\n if len(dims) == 4:\n b, d, h, w = dims\n x = windows.view(\n b,\n d // window_size[0],\n h // window_size[1],\n w // window_size[2],\n window_size[0],\n window_size[1],\n window_size[2],\n -1,\n )\n x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(b, d, h, w, -1)\n\n elif len(dims) == 3:\n b, h, w = dims\n x = windows.view(b, h // window_size[0], w // window_size[0], window_size[0], window_size[1], -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)\n return x\n\n\ndef get_window_size(x_size, window_size, shift_size=None):\n \"\"\"Computing window size based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n\n Args:\n x_size: input size.\n window_size: local window size.\n shift_size: window shifting size.\n \"\"\"\n\n use_window_size = list(window_size)\n if shift_size is not None:\n use_shift_size = list(shift_size)\n for i in range(len(x_size)):\n if x_size[i] <= window_size[i]:\n use_window_size[i] = x_size[i]\n if shift_size is not None:\n use_shift_size[i] = 0\n\n if shift_size is None:\n return tuple(use_window_size)\n else:\n return tuple(use_window_size), tuple(use_shift_size)\n\n\nclass WindowAttention(nn.Module):\n \"\"\"\n Window based multi-head self attention module with relative position bias based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n num_heads: int,\n window_size: Sequence[int],\n qkv_bias: bool = False,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n ) -> None:\n \"\"\"\n Args:\n dim: number of feature channels.\n num_heads: number of attention heads.\n window_size: local window size.\n qkv_bias: add a learnable bias to query, key, value.\n attn_drop: attention dropout rate.\n proj_drop: dropout rate of output.\n \"\"\"\n\n super().__init__()\n self.dim = dim\n self.window_size = window_size\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim**-0.5\n mesh_args = torch.meshgrid.__kwdefaults__\n\n if len(self.window_size) == 3:\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros(\n (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1),\n num_heads,\n )\n )\n coords_d = torch.arange(self.window_size[0])\n coords_h = torch.arange(self.window_size[1])\n coords_w = torch.arange(self.window_size[2])\n if mesh_args is not None:\n coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w, indexing=\"ij\"))\n else:\n coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w))\n coords_flatten = torch.flatten(coords, 1)\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]\n relative_coords = relative_coords.permute(1, 2, 0).contiguous()\n relative_coords[:, :, 0] += self.window_size[0] - 1\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 2] += self.window_size[2] - 1\n relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)\n relative_coords[:, :, 1] *= 2 * self.window_size[2] - 1\n elif len(self.window_size) == 2:\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)\n )\n coords_h = torch.arange(self.window_size[0])\n coords_w = torch.arange(self.window_size[1])\n if mesh_args is not None:\n coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing=\"ij\"))\n else:\n coords = torch.stack(torch.meshgrid(coords_h, coords_w))\n coords_flatten = torch.flatten(coords, 1)\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]\n relative_coords = relative_coords.permute(1, 2, 0).contiguous()\n relative_coords[:, :, 0] += self.window_size[0] - 1\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n\n relative_position_index = relative_coords.sum(-1)\n self.register_buffer(\"relative_position_index\", relative_position_index)\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n trunc_normal_(self.relative_position_bias_table, std=0.02)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x, mask):\n b, n, c = x.shape\n qkv = self.qkv(x).reshape(b, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2]\n q = q * self.scale\n attn = q @ k.transpose(-2, -1)\n relative_position_bias = self.relative_position_bias_table[\n self.relative_position_index[:n, :n].reshape(-1)\n ].reshape(n, n, -1)\n relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()\n attn = attn + relative_position_bias.unsqueeze(0)\n if mask is not None:\n nw = mask.shape[0]\n attn = attn.view(b // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0)\n attn = attn.view(-1, self.num_heads, n, n)\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n x = (attn @ v).transpose(1, 2).reshape(b, n, c)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass SwinTransformerBlock(nn.Module):\n \"\"\"\n Swin Transformer block based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n num_heads: int,\n window_size: Sequence[int],\n shift_size: Sequence[int],\n mlp_ratio: float = 4.0,\n qkv_bias: bool = True,\n drop: float = 0.0,\n attn_drop: float = 0.0,\n drop_path: float = 0.0,\n act_layer: str = \"GELU\",\n norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore\n use_checkpoint: bool = False,\n ) -> None:\n \"\"\"\n Args:\n dim: number of feature channels.\n num_heads: number of attention heads.\n window_size: local window size.\n shift_size: window shift size.\n mlp_ratio: ratio of mlp hidden dim to embedding dim.\n qkv_bias: add a learnable bias to query, key, value.\n drop: dropout rate.\n attn_drop: attention dropout rate.\n drop_path: stochastic depth rate.\n act_layer: activation layer.\n norm_layer: normalization layer.\n use_checkpoint: use gradient checkpointing for reduced memory usage.\n \"\"\"\n\n super().__init__()\n self.dim = dim\n self.num_heads = num_heads\n self.window_size = window_size\n self.shift_size = shift_size\n self.mlp_ratio = mlp_ratio\n self.use_checkpoint = use_checkpoint\n self.norm1 = norm_layer(dim)\n self.attn = WindowAttention(\n dim,\n window_size=self.window_size,\n num_heads=num_heads,\n qkv_bias=qkv_bias,\n attn_drop=attn_drop,\n proj_drop=drop,\n )\n\n self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(hidden_size=dim, mlp_dim=mlp_hidden_dim, act=act_layer, dropout_rate=drop, dropout_mode=\"swin\")\n\n def forward_part1(self, x, mask_matrix):\n x_shape = x.size()\n x = self.norm1(x)\n if len(x_shape) == 5:\n b, d, h, w, c = x.shape\n window_size, shift_size = get_window_size((d, h, w), self.window_size, self.shift_size)\n pad_l = pad_t = pad_d0 = 0\n pad_d1 = (window_size[0] - d % window_size[0]) % window_size[0]\n pad_b = (window_size[1] - h % window_size[1]) % window_size[1]\n pad_r = (window_size[2] - w % window_size[2]) % window_size[2]\n x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1))\n _, dp, hp, wp, _ = x.shape\n dims = [b, dp, hp, wp]\n\n elif len(x_shape) == 4:\n b, h, w, c = x.shape\n window_size, shift_size = get_window_size((h, w), self.window_size, self.shift_size)\n pad_l = pad_t = 0\n pad_r = (window_size[0] - h % window_size[0]) % window_size[0]\n pad_b = (window_size[1] - w % window_size[1]) % window_size[1]\n x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n _, hp, wp, _ = x.shape\n dims = [b, hp, wp]\n\n if any(i > 0 for i in shift_size):\n if len(x_shape) == 5:\n shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))\n elif len(x_shape) == 4:\n shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1]), dims=(1, 2))\n attn_mask = mask_matrix\n else:\n shifted_x = x\n attn_mask = None\n x_windows = window_partition(shifted_x, window_size)\n attn_windows = self.attn(x_windows, mask=attn_mask)\n attn_windows = attn_windows.view(-1, *(window_size + (c,)))\n shifted_x = window_reverse(attn_windows, window_size, dims)\n if any(i > 0 for i in shift_size):\n if len(x_shape) == 5:\n x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))\n elif len(x_shape) == 4:\n x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1]), dims=(1, 2))\n else:\n x = shifted_x\n\n if len(x_shape) == 5:\n if pad_d1 > 0 or pad_r > 0 or pad_b > 0:\n x = x[:, :d, :h, :w, :].contiguous()\n elif len(x_shape) == 4:\n if pad_r > 0 or pad_b > 0:\n x = x[:, :h, :w, :].contiguous()\n\n return x\n\n def forward_part2(self, x):\n return self.drop_path(self.mlp(self.norm2(x)))\n\n def load_from(self, weights, n_block, layer):\n root = f\"module.{layer}.0.blocks.{n_block}.\"\n block_names = [\n \"norm1.weight\",\n \"norm1.bias\",\n \"attn.relative_position_bias_table\",\n \"attn.relative_position_index\",\n \"attn.qkv.weight\",\n \"attn.qkv.bias\",\n \"attn.proj.weight\",\n \"attn.proj.bias\",\n \"norm2.weight\",\n \"norm2.bias\",\n \"mlp.fc1.weight\",\n \"mlp.fc1.bias\",\n \"mlp.fc2.weight\",\n \"mlp.fc2.bias\",\n ]\n with torch.no_grad():\n self.norm1.weight.copy_(weights[\"state_dict\"][root + block_names[0]])\n self.norm1.bias.copy_(weights[\"state_dict\"][root + block_names[1]])\n self.attn.relative_position_bias_table.copy_(weights[\"state_dict\"][root + block_names[2]])\n self.attn.relative_position_index.copy_(weights[\"state_dict\"][root + block_names[3]])\n self.attn.qkv.weight.copy_(weights[\"state_dict\"][root + block_names[4]])\n self.attn.qkv.bias.copy_(weights[\"state_dict\"][root + block_names[5]])\n self.attn.proj.weight.copy_(weights[\"state_dict\"][root + block_names[6]])\n self.attn.proj.bias.copy_(weights[\"state_dict\"][root + block_names[7]])\n self.norm2.weight.copy_(weights[\"state_dict\"][root + block_names[8]])\n self.norm2.bias.copy_(weights[\"state_dict\"][root + block_names[9]])\n self.mlp.linear1.weight.copy_(weights[\"state_dict\"][root + block_names[10]])\n self.mlp.linear1.bias.copy_(weights[\"state_dict\"][root + block_names[11]])\n self.mlp.linear2.weight.copy_(weights[\"state_dict\"][root + block_names[12]])\n self.mlp.linear2.bias.copy_(weights[\"state_dict\"][root + block_names[13]])\n\n def forward(self, x, mask_matrix):\n shortcut = x\n if self.use_checkpoint:\n x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix)\n else:\n x = self.forward_part1(x, mask_matrix)\n x = shortcut + self.drop_path(x)\n if self.use_checkpoint:\n x = x + checkpoint.checkpoint(self.forward_part2, x)\n else:\n x = x + self.forward_part2(x)\n return x\n\n\nclass PatchMerging(nn.Module):\n \"\"\"\n Patch merging layer based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n \"\"\"\n\n def __init__(\n self, dim: int, norm_layer: Type[LayerNorm] = nn.LayerNorm, spatial_dims: int = 3\n ) -> None: # type: ignore\n \"\"\"\n Args:\n dim: number of feature channels.\n norm_layer: normalization layer.\n spatial_dims: number of spatial dims.\n \"\"\"\n\n super().__init__()\n self.dim = dim\n if spatial_dims == 3:\n self.reduction = nn.Linear(8 * dim, 2 * dim, bias=False)\n self.norm = norm_layer(8 * dim)\n elif spatial_dims == 2:\n self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n self.norm = norm_layer(4 * dim)\n\n def forward(self, x):\n\n x_shape = x.size()\n if len(x_shape) == 5:\n b, d, h, w, c = x_shape\n pad_input = (h % 2 == 1) or (w % 2 == 1) or (d % 2 == 1)\n if pad_input:\n x = F.pad(x, (0, 0, 0, d % 2, 0, w % 2, 0, h % 2))\n x0 = x[:, 0::2, 0::2, 0::2, :]\n x1 = x[:, 1::2, 0::2, 0::2, :]\n x2 = x[:, 0::2, 1::2, 0::2, :]\n x3 = x[:, 0::2, 0::2, 1::2, :]\n x4 = x[:, 1::2, 0::2, 1::2, :]\n x5 = x[:, 0::2, 1::2, 0::2, :]\n x6 = x[:, 0::2, 0::2, 1::2, :]\n x7 = x[:, 1::2, 1::2, 1::2, :]\n x = torch.cat([x0, x1, x2, x3, x4, x5, x6, x7], -1)\n\n elif len(x_shape) == 4:\n b, h, w, c = x_shape\n pad_input = (h % 2 == 1) or (w % 2 == 1)\n if pad_input:\n x = F.pad(x, (0, 0, 0, w % 2, 0, h % 2))\n x0 = x[:, 0::2, 0::2, :]\n x1 = x[:, 1::2, 0::2, :]\n x2 = x[:, 0::2, 1::2, :]\n x3 = x[:, 1::2, 1::2, :]\n x = torch.cat([x0, x1, x2, x3], -1)\n\n x = self.norm(x)\n x = self.reduction(x)\n return x\n\n\ndef compute_mask(dims, window_size, shift_size, device):\n \"\"\"Computing region masks based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n\n Args:\n dims: dimension values.\n window_size: local window size.\n shift_size: shift size.\n device: device.\n \"\"\"\n\n cnt = 0\n\n if len(dims) == 3:\n d, h, w = dims\n img_mask = torch.zeros((1, d, h, w, 1), device=device)\n for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):\n for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):\n for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2], None):\n img_mask[:, d, h, w, :] = cnt\n cnt += 1\n\n elif len(dims) == 2:\n h, w = dims\n img_mask = torch.zeros((1, h, w, 1), device=device)\n for h in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):\n for w in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, window_size)\n mask_windows = mask_windows.squeeze(-1)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n\n return attn_mask\n\n\nclass BasicLayer(nn.Module):\n \"\"\"\n Basic Swin Transformer layer in one stage based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n depth: int,\n num_heads: int,\n window_size: Sequence[int],\n drop_path: list,\n mlp_ratio: float = 4.0,\n qkv_bias: bool = False,\n drop: float = 0.0,\n attn_drop: float = 0.0,\n norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore\n downsample: isinstance = None, # type: ignore\n use_checkpoint: bool = False,\n ) -> None:\n \"\"\"\n Args:\n dim: number of feature channels.\n depths: number of layers in each stage.\n num_heads: number of attention heads.\n window_size: local window size.\n drop_path: stochastic depth rate.\n mlp_ratio: ratio of mlp hidden dim to embedding dim.\n qkv_bias: add a learnable bias to query, key, value.\n drop: dropout rate.\n attn_drop: attention dropout rate.\n norm_layer: normalization layer.\n downsample: downsample layer at the end of the layer.\n use_checkpoint: use gradient checkpointing for reduced memory usage.\n \"\"\"\n\n super().__init__()\n self.window_size = window_size\n self.shift_size = tuple(i // 2 for i in window_size)\n self.no_shift = tuple(0 for i in window_size)\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n self.blocks = nn.ModuleList(\n [\n SwinTransformerBlock(\n dim=dim,\n num_heads=num_heads,\n window_size=self.window_size,\n shift_size=self.no_shift if (i % 2 == 0) else self.shift_size,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop,\n attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n norm_layer=norm_layer,\n use_checkpoint=use_checkpoint,\n )\n for i in range(depth)\n ]\n )\n self.downsample = downsample\n if self.downsample is not None:\n self.downsample = downsample(dim=dim, norm_layer=norm_layer, spatial_dims=len(self.window_size))\n\n def forward(self, x):\n x_shape = x.size()\n if len(x_shape) == 5:\n b, c, d, h, w = x_shape\n window_size, shift_size = get_window_size((d, h, w), self.window_size, self.shift_size)\n x = rearrange(x, \"b c d h w -> b d h w c\")\n dp = int(np.ceil(d / window_size[0])) * window_size[0]\n hp = int(np.ceil(h / window_size[1])) * window_size[1]\n wp = int(np.ceil(w / window_size[2])) * window_size[2]\n attn_mask = compute_mask([dp, hp, wp], window_size, shift_size, x.device)\n for blk in self.blocks:\n x = blk(x, attn_mask)\n x = x.view(b, d, h, w, -1)\n if self.downsample is not None:\n x = self.downsample(x)\n x = rearrange(x, \"b d h w c -> b c d h w\")\n\n elif len(x_shape) == 4:\n b, c, h, w = x_shape\n window_size, shift_size = get_window_size((h, w), self.window_size, self.shift_size)\n x = rearrange(x, \"b c h w -> b h w c\")\n hp = int(np.ceil(h / window_size[0])) * window_size[0]\n wp = int(np.ceil(w / window_size[1])) * window_size[1]\n attn_mask = compute_mask([hp, wp], window_size, shift_size, x.device)\n for blk in self.blocks:\n x = blk(x, attn_mask)\n x = x.view(b, h, w, -1)\n if self.downsample is not None:\n x = self.downsample(x)\n x = rearrange(x, \"b h w c -> b c h w\")\n return x\n\n\nclass SwinTransformer(nn.Module):\n \"\"\"\n Swin Transformer based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/abs/2103.14030>\"\n https://github.com/microsoft/Swin-Transformer\n \"\"\"\n\n def __init__(\n self,\n in_chans: int,\n embed_dim: int,\n window_size: Sequence[int],\n patch_size: Sequence[int],\n depths: Sequence[int],\n num_heads: Sequence[int],\n mlp_ratio: float = 4.0,\n qkv_bias: bool = True,\n drop_rate: float = 0.0,\n attn_drop_rate: float = 0.0,\n drop_path_rate: float = 0.0,\n norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore\n patch_norm: bool = False,\n use_checkpoint: bool = False,\n spatial_dims: int = 3,\n ) -> None:\n \"\"\"\n Args:\n in_chans: dimension of input channels.\n embed_dim: number of linear projection output channels.\n window_size: local window size.\n patch_size: patch size.\n depths: number of layers in each stage.\n num_heads: number of attention heads.\n mlp_ratio: ratio of mlp hidden dim to embedding dim.\n qkv_bias: add a learnable bias to query, key, value.\n drop_rate: dropout rate.\n attn_drop_rate: attention dropout rate.\n drop_path_rate: stochastic depth rate.\n norm_layer: normalization layer.\n patch_norm: add normalization after patch embedding.\n use_checkpoint: use gradient checkpointing for reduced memory usage.\n spatial_dims: spatial dimension.\n \"\"\"\n\n super().__init__()\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.patch_norm = patch_norm\n self.window_size = window_size\n self.patch_size = patch_size\n self.patch_embed = PatchEmbed(\n patch_size=self.patch_size,\n in_chans=in_chans,\n embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None, # type: ignore\n spatial_dims=spatial_dims,\n )\n self.pos_drop = nn.Dropout(p=drop_rate)\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]\n self.layers1 = nn.ModuleList()\n self.layers2 = nn.ModuleList()\n self.layers3 = nn.ModuleList()\n self.layers4 = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(\n dim=int(embed_dim * 2**i_layer),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=self.window_size,\n drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n norm_layer=norm_layer,\n downsample=PatchMerging,\n use_checkpoint=use_checkpoint,\n )\n if i_layer == 0:\n self.layers1.append(layer)\n elif i_layer == 1:\n self.layers2.append(layer)\n elif i_layer == 2:\n self.layers3.append(layer)\n elif i_layer == 3:\n self.layers4.append(layer)\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n\n def proj_out(self, x, normalize=False):\n if normalize:\n x_shape = x.size()\n if len(x_shape) == 5:\n n, ch, d, h, w = x_shape\n x = rearrange(x, \"n c d h w -> n d h w c\")\n x = F.layer_norm(x, [ch])\n x = rearrange(x, \"n d h w c -> n c d h w\")\n elif len(x_shape) == 4:\n n, ch, h, w = x_shape\n x = rearrange(x, \"n c h w -> n h w c\")\n x = F.layer_norm(x, [ch])\n x = rearrange(x, \"n h w c -> n c h w\")\n return x\n\n def forward(self, x, normalize=True):\n x0 = self.patch_embed(x)\n x0 = self.pos_drop(x0)\n x0_out = self.proj_out(x0, normalize)\n x1 = self.layers1[0](x0.contiguous())\n x1_out = self.proj_out(x1, normalize)\n x2 = self.layers2[0](x1.contiguous())\n x2_out = self.proj_out(x2, normalize)\n x3 = self.layers3[0](x2.contiguous())\n x3_out = self.proj_out(x3, normalize)\n x4 = self.layers4[0](x3.contiguous())\n x4_out = self.proj_out(x4, normalize)\n return [x0_out, x1_out, x2_out, x3_out, x4_out]\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.engines import PrepareBatchExtraInput, SupervisedEvaluator\nfrom tests.utils import assert_allclose\n\nTEST_CASE_0 = [\n {\"extra_keys\": \"extra1\"},\n {\"x\": torch.tensor([1, 2]), \"t1\": torch.tensor([5, 6]), \"t2\": None, \"t3\": None},\n]\n\nTEST_CASE_1 = [\n {\"extra_keys\": [\"extra1\", \"extra3\"]},\n {\"x\": torch.tensor([1, 2]), \"t1\": torch.tensor([5, 6]), \"t2\": \"test\", \"t3\": None},\n]\n\nTEST_CASE_2 = [\n {\"extra_keys\": {\"t1\": \"extra2\", \"t2\": \"extra3\", \"t3\": \"extra1\"}},\n {\"x\": torch.tensor([1, 2]), \"t1\": 16, \"t2\": \"test\", \"t3\": torch.tensor([5, 6])},\n]\n\n\nclass TestNet(torch.nn.Module):\n def forward(self, x: torch.Tensor, t1=None, t2=None, t3=None):\n return {\"x\": x, \"t1\": t1, \"t2\": t2, \"t3\": t3}\n\n\nclass TestPrepareBatchExtraInput(unittest.TestCase):\n @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2])\n def test_content(self, input_args, expected_value):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n dataloader = [\n {\n \"image\": torch.tensor([1, 2]),\n \"label\": torch.tensor([3, 4]),\n \"extra1\": torch.tensor([5, 6]),\n \"extra2\": 16,\n \"extra3\": \"test\",\n }\n ]\n # set up engine\n evaluator = SupervisedEvaluator(\n device=device,\n val_data_loader=dataloader,\n epoch_length=1,\n network=TestNet(),\n non_blocking=True,\n prepare_batch=PrepareBatchExtraInput(**input_args),\n decollate=False,\n )\n evaluator.run()\n output = evaluator.state.output\n assert_allclose(output[\"image\"], torch.tensor([1, 2], device=device))\n assert_allclose(output[\"label\"], torch.tensor([3, 4], device=device))\n for k, v in output[\"pred\"].items():\n if isinstance(v, torch.Tensor):\n assert_allclose(v, expected_value[k].to(device))\n else:\n self.assertEqual(v, expected_value[k])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nimport unittest\nfrom unittest import skipUnless\n\nimport torch\nfrom ignite.engine import Engine\nfrom parameterized import parameterized\n\nfrom monai.config import IgniteInfo\nfrom monai.data import Dataset\nfrom monai.handlers import GarbageCollector\nfrom monai.utils import min_version, optional_import\n\nEvents, has_ignite = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Events\")\n\n\nTEST_CASE_0 = [[0, 1, 2], \"epoch\"]\n\nTEST_CASE_1 = [[0, 1, 2], \"iteration\"]\n\nTEST_CASE_2 = [[0, 1, 2], Events.EPOCH_COMPLETED]\n\n\nclass TestHandlerGarbageCollector(unittest.TestCase):\n @skipUnless(has_ignite, \"Requires ignite\")\n @parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2])\n def test_content(self, data, trigger_event):\n # set up engine\n gb_count_dict = {}\n\n def _train_func(engine, batch):\n # store garbage collection counts\n if trigger_event == Events.EPOCH_COMPLETED or trigger_event.lower() == \"epoch\":\n if engine.state.iteration % engine.state.epoch_length == 1:\n gb_count_dict[engine.state.epoch] = gc.get_count()\n elif trigger_event.lower() == \"iteration\":\n gb_count_dict[engine.state.iteration] = gc.get_count()\n\n engine = Engine(_train_func)\n\n # set up testing handler\n dataset = Dataset(data, transform=None)\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=1)\n GarbageCollector(trigger_event=trigger_event, log_level=30).attach(engine)\n\n engine.run(data_loader, max_epochs=5)\n\n first_count = 0\n for iter, gb_count in gb_count_dict.items():\n # At least one zero-generation object is collected\n # self.assertGreaterEqual(gb_count[0], 0)\n if iter > 1:\n # Since we are collecting all objects from all generations manually at each call,\n # starting from the second call, there shouldn't be any 1st and 2nd\n # generation objects available to collect.\n self.assertEqual(gb_count[1], first_count)\n self.assertEqual(gb_count[2], first_count)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# =========================================================================\n# Adapted from https://github.com/pytorch/vision/blob/main/torchvision/models/detection/_utils.py\n# which has the following license...\n# https://github.com/pytorch/vision/blob/main/LICENSE\n#\n# BSD 3-Clause License\n\n# Copyright (c) Soumith Chintala 2016,\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nThis script is modified from torchvision to support N-D images,\n\nhttps://github.com/pytorch/vision/blob/main/torchvision/models/detection/_utils.py\n\"\"\"\n\nimport math\nfrom typing import Sequence, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom monai.data.box_utils import COMPUTE_DTYPE, CenterSizeMode, StandardMode, convert_box_mode, is_valid_box_values\nfrom monai.utils.module import look_up_option\n\n\ndef encode_boxes(gt_boxes: Tensor, proposals: Tensor, weights: Tensor) -> Tensor:\n \"\"\"\n Encode a set of proposals with respect to some reference ground truth (gt) boxes.\n\n Args:\n gt_boxes: gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``\n proposals: boxes to be encoded, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``\n weights: the weights for ``(cx, cy, w, h) or (cx,cy,cz, w,h,d)``\n\n Return:\n encoded gt, target of box regression that is used to convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.\n \"\"\"\n\n if gt_boxes.shape[0] != proposals.shape[0]:\n raise ValueError(\"gt_boxes.shape[0] should be equal to proposals.shape[0].\")\n spatial_dims = look_up_option(len(weights), [4, 6]) // 2\n\n if not is_valid_box_values(gt_boxes):\n raise ValueError(\"gt_boxes is not valid. Please check if it contains empty boxes.\")\n if not is_valid_box_values(proposals):\n raise ValueError(\"proposals is not valid. Please check if it contains empty boxes.\")\n\n # implementation starts here\n ex_cccwhd: Tensor = convert_box_mode(proposals, src_mode=StandardMode, dst_mode=CenterSizeMode) # type: ignore\n gt_cccwhd: Tensor = convert_box_mode(gt_boxes, src_mode=StandardMode, dst_mode=CenterSizeMode) # type: ignore\n targets_dxyz = (\n weights[:spatial_dims].unsqueeze(0)\n * (gt_cccwhd[:, :spatial_dims] - ex_cccwhd[:, :spatial_dims])\n / ex_cccwhd[:, spatial_dims:]\n )\n targets_dwhd = weights[spatial_dims:].unsqueeze(0) * torch.log(\n gt_cccwhd[:, spatial_dims:] / ex_cccwhd[:, spatial_dims:]\n )\n\n targets = torch.cat((targets_dxyz, targets_dwhd), dim=1)\n # torch.log may cause NaN or Inf\n if torch.isnan(targets).any() or torch.isinf(targets).any():\n raise ValueError(\"targets is NaN or Inf.\")\n return targets\n\n\nclass BoxCoder:\n \"\"\"\n This class encodes and decodes a set of bounding boxes into\n the representation used for training the regressors.\n\n Args:\n weights: 4-element tuple or 6-element tuple\n boxes_xform_clip: high threshold to prevent sending too large values into torch.exp()\n\n Example:\n .. code-block:: python\n\n box_coder = BoxCoder(weights=[1., 1., 1., 1., 1., 1.])\n gt_boxes = torch.tensor([[1,2,1,4,5,6],[1,3,2,7,8,9]])\n proposals = gt_boxes + torch.rand(gt_boxes.shape)\n rel_gt_boxes = box_coder.encode_single(gt_boxes, proposals)\n gt_back = box_coder.decode_single(rel_gt_boxes, proposals)\n # We expect gt_back to be equal to gt_boxes\n \"\"\"\n\n def __init__(self, weights: Tuple[float], boxes_xform_clip: Union[float, None] = None) -> None:\n if boxes_xform_clip is None:\n boxes_xform_clip = math.log(1000.0 / 16)\n self.spatial_dims = look_up_option(len(weights), [4, 6]) // 2\n self.weights = weights\n self.boxes_xform_clip = boxes_xform_clip\n\n def encode(self, gt_boxes: Sequence[Tensor], proposals: Sequence[Tensor]) -> Tuple[Tensor]:\n \"\"\"\n Encode a set of proposals with respect to some ground truth (gt) boxes.\n\n Args:\n gt_boxes: list of gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``\n proposals: list of boxes to be encoded, each element is Mx4 or Mx6 torch tensor.\n The box mode is assumed to be ``StandardMode``\n\n Return:\n A tuple of encoded gt, target of box regression that is used to\n convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.\n \"\"\"\n boxes_per_image = [len(b) for b in gt_boxes]\n # concat the lists to do computation\n concat_gt_boxes = torch.cat(tuple(gt_boxes), dim=0)\n concat_proposals = torch.cat(tuple(proposals), dim=0)\n concat_targets = self.encode_single(concat_gt_boxes, concat_proposals)\n # split to tuple\n targets: Tuple[Tensor] = concat_targets.split(boxes_per_image, 0)\n return targets\n\n def encode_single(self, gt_boxes: Tensor, proposals: Tensor) -> Tensor:\n \"\"\"\n Encode proposals with respect to ground truth (gt) boxes.\n\n Args:\n gt_boxes: gt boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``\n proposals: boxes to be encoded, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``\n\n Return:\n encoded gt, target of box regression that is used to convert proposals into gt_boxes, Nx4 or Nx6 torch tensor.\n \"\"\"\n dtype = gt_boxes.dtype\n device = gt_boxes.device\n weights = torch.as_tensor(self.weights, dtype=dtype, device=device)\n targets = encode_boxes(gt_boxes, proposals, weights)\n return targets\n\n def decode(self, rel_codes: Tensor, reference_boxes: Sequence[Tensor]) -> Tensor:\n \"\"\"\n From a set of original reference_boxes and encoded relative box offsets,\n\n Args:\n rel_codes: encoded boxes, Nx4 or Nx6 torch tensor.\n boxes: a list of reference boxes, each element is Mx4 or Mx6 torch tensor.\n The box mode is assumed to be ``StandardMode``\n\n Return:\n decoded boxes, Nx1x4 or Nx1x6 torch tensor. The box mode will be ``StandardMode``\n \"\"\"\n if not isinstance(reference_boxes, Sequence) or (not isinstance(rel_codes, torch.Tensor)):\n raise ValueError(\"Input arguments wrong type.\")\n boxes_per_image = [b.size(0) for b in reference_boxes]\n # concat the lists to do computation\n concat_boxes = torch.cat(tuple(reference_boxes), dim=0)\n box_sum = 0\n for val in boxes_per_image:\n box_sum += val\n if box_sum > 0:\n rel_codes = rel_codes.reshape(box_sum, -1)\n pred_boxes = self.decode_single(rel_codes, concat_boxes)\n if box_sum > 0:\n pred_boxes = pred_boxes.reshape(box_sum, -1, 2 * self.spatial_dims)\n return pred_boxes\n\n def decode_single(self, rel_codes: Tensor, reference_boxes: Tensor) -> Tensor:\n \"\"\"\n From a set of original boxes and encoded relative box offsets,\n\n Args:\n rel_codes: encoded boxes, Nx(4*num_box_reg) or Nx(6*num_box_reg) torch tensor.\n reference_boxes: reference boxes, Nx4 or Nx6 torch tensor. The box mode is assumed to be ``StandardMode``\n\n Return:\n decoded boxes, Nx(4*num_box_reg) or Nx(6*num_box_reg) torch tensor. The box mode will to be ``StandardMode``\n \"\"\"\n reference_boxes = reference_boxes.to(rel_codes.dtype)\n offset = reference_boxes.shape[-1]\n\n pred_boxes = []\n boxes_cccwhd = convert_box_mode(reference_boxes, src_mode=StandardMode, dst_mode=CenterSizeMode)\n for axis in range(self.spatial_dims):\n whd_axis = boxes_cccwhd[:, axis + self.spatial_dims]\n ctr_xyz_axis = boxes_cccwhd[:, axis]\n dxyz_axis = rel_codes[:, axis::offset] / self.weights[axis]\n dwhd_axis = rel_codes[:, self.spatial_dims + axis :: offset] / self.weights[axis + self.spatial_dims]\n # Prevent sending too large values into torch.exp()\n dwhd_axis = torch.clamp(dwhd_axis.to(COMPUTE_DTYPE), max=self.boxes_xform_clip)\n\n pred_ctr_xyx_axis = dxyz_axis * whd_axis[:, None] + ctr_xyz_axis[:, None]\n pred_whd_axis = torch.exp(dwhd_axis) * whd_axis[:, None]\n pred_whd_axis = pred_whd_axis.to(dxyz_axis.dtype)\n\n # When convert float32 to float16, Inf or Nan may occur\n if torch.isnan(pred_whd_axis).any() or torch.isinf(pred_whd_axis).any():\n raise ValueError(\"pred_whd_axis is NaN or Inf.\")\n\n # Distance from center to box's corner.\n c_to_c_whd_axis = (\n torch.tensor(0.5, dtype=pred_ctr_xyx_axis.dtype, device=pred_whd_axis.device) * pred_whd_axis\n )\n\n pred_boxes.append(pred_ctr_xyx_axis - c_to_c_whd_axis)\n pred_boxes.append(pred_ctr_xyx_axis + c_to_c_whd_axis)\n\n pred_boxes = pred_boxes[::2] + pred_boxes[1::2]\n pred_boxes_final = torch.stack(pred_boxes, dim=2).flatten(1)\n return pred_boxes_final\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\nimport unittest\nfrom unittest.case import skipUnless\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.data import NrrdReader\nfrom monai.utils.module import optional_import\n\nnrrd, has_nrrd = optional_import(\"nrrd\", allow_namespace_pkg=True)\n\nTEST_CASE_1 = [(4, 4), \"test_image.nrrd\", (4, 4), np.uint8]\nTEST_CASE_2 = [(4, 4, 4), \"test_image.nrrd\", (4, 4, 4), np.uint16]\nTEST_CASE_3 = [(4, 4, 4, 4), \"test_image.nrrd\", (4, 4, 4, 4), np.uint32]\nTEST_CASE_4 = [(1, 2, 3, 4, 5), \"test_image.nrrd\", (1, 2, 3, 4, 5), np.uint64]\nTEST_CASE_5 = [(6, 5, 4, 3, 2, 1), \"test_image.nrrd\", (6, 5, 4, 3, 2, 1), np.float32]\nTEST_CASE_6 = [(4,), \"test_image.nrrd\", (4,), np.float64]\nTEST_CASE_7 = [(4, 4), [\"test_image.nrrd\", \"test_image2.nrrd\", \"test_image3.nrrd\"], (4, 4), np.float32]\nTEST_CASE_8 = [\n (3, 4, 4, 1),\n \"test_image.nrrd\",\n (3, 4, 4, 1),\n np.float32,\n {\n \"dimension\": 4,\n \"space\": \"left-posterior-superior\",\n \"sizes\": [3, 4, 4, 1],\n \"space directions\": [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],\n \"space origin\": [0.0, 0.0, 0.0],\n },\n]\n\n\n@skipUnless(has_nrrd, \"nrrd required\")\nclass TestNrrdReader(unittest.TestCase):\n def test_verify_suffix(self):\n reader = NrrdReader()\n self.assertFalse(reader.verify_suffix(\"test_image.nrd\"))\n reader.verify_suffix(\"test_image.nrrd\")\n reader.verify_suffix(\"test_image.seg.nrrd\")\n\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])\n def test_read_int(self, data_shape, filename, expected_shape, dtype):\n min_val, max_val = np.iinfo(dtype).min, np.iinfo(dtype).max\n test_image = np.random.randint(min_val, max_val, size=data_shape, dtype=dtype)\n with tempfile.TemporaryDirectory() as tempdir:\n filename = os.path.join(tempdir, filename)\n nrrd.write(filename, test_image.astype(dtype))\n reader = NrrdReader()\n result = reader.read(filename)\n self.assertEqual(result.array.dtype, dtype)\n self.assertTupleEqual(result.array.shape, expected_shape)\n self.assertTupleEqual(tuple(result.header[\"sizes\"]), expected_shape)\n np.testing.assert_allclose(result.array, test_image)\n\n @parameterized.expand([TEST_CASE_5, TEST_CASE_6])\n def test_read_float(self, data_shape, filename, expected_shape, dtype):\n test_image = np.random.rand(*data_shape).astype(dtype)\n with tempfile.TemporaryDirectory() as tempdir:\n filename = os.path.join(tempdir, filename)\n nrrd.write(filename, test_image.astype(dtype))\n reader = NrrdReader()\n result = reader.read(filename)\n self.assertEqual(result.array.dtype, dtype)\n self.assertTupleEqual(result.array.shape, expected_shape)\n self.assertTupleEqual(tuple(result.header[\"sizes\"]), expected_shape)\n np.testing.assert_allclose(result.array, test_image)\n\n @parameterized.expand([TEST_CASE_7])\n def test_read_list(self, data_shape, filenames, expected_shape, dtype):\n test_image = np.random.rand(*data_shape).astype(dtype)\n with tempfile.TemporaryDirectory() as tempdir:\n for i, filename in enumerate(filenames):\n filenames[i] = os.path.join(tempdir, filename)\n nrrd.write(filenames[i], test_image.astype(dtype))\n reader = NrrdReader()\n results = reader.read(filenames)\n for result in results:\n self.assertTupleEqual(result.array.shape, expected_shape)\n self.assertTupleEqual(tuple(result.header[\"sizes\"]), expected_shape)\n np.testing.assert_allclose(result.array, test_image)\n\n @parameterized.expand([TEST_CASE_8])\n def test_read_with_header(self, data_shape, filename, expected_shape, dtype, reference_header):\n test_image = np.random.rand(*data_shape).astype(dtype)\n with tempfile.TemporaryDirectory() as tempdir:\n filename = os.path.join(tempdir, filename)\n nrrd.write(filename, test_image.astype(dtype), header=reference_header)\n reader = NrrdReader()\n image_array, image_header = reader.get_data(reader.read(filename))\n self.assertIsInstance(image_array, np.ndarray)\n self.assertEqual(image_array.dtype, dtype)\n self.assertTupleEqual(image_array.shape, expected_shape)\n np.testing.assert_allclose(image_array, test_image)\n self.assertIsInstance(image_header, dict)\n self.assertTupleEqual(tuple(image_header[\"spatial_shape\"]), expected_shape)\n\n @parameterized.expand([TEST_CASE_8])\n def test_read_with_header_index_order_c(self, data_shape, filename, expected_shape, dtype, reference_header):\n test_image = np.random.rand(*data_shape).astype(dtype)\n with tempfile.TemporaryDirectory() as tempdir:\n filename = os.path.join(tempdir, filename)\n nrrd.write(filename, test_image.astype(dtype), header=reference_header)\n reader = NrrdReader(index_order=\"C\")\n image_array, image_header = reader.get_data(reader.read(filename))\n self.assertIsInstance(image_array, np.ndarray)\n self.assertEqual(image_array.dtype, dtype)\n self.assertTupleEqual(image_array.shape, expected_shape[::-1])\n self.assertTupleEqual(image_array.shape, tuple(image_header[\"spatial_shape\"]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom typing import Any, Sequence, Union\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.networks import eval_mode\nfrom monai.networks.nets import DynUNet\nfrom monai.utils.module import pytorch_after\nfrom tests.utils import skip_if_no_cuda, skip_if_windows, test_script_save\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nstrides: Sequence[Union[Sequence[int], int]]\nkernel_size: Sequence[Any]\nexpected_shape: Sequence[Any]\n\nTEST_CASE_DYNUNET_2D = []\nout_channels = 2\nin_size = 64\nspatial_dims = 2\nfor kernel_size in [(3, 3, 3, 1), ((3, 1), 1, (3, 3), (1, 1))]:\n for strides in [(1, 1, 1, 1), (2, 2, 2, 1)]:\n expected_shape = (1, out_channels, *[in_size // strides[0]] * spatial_dims)\n for in_channels in [2, 3]:\n for res_block in [True, False]:\n test_case = [\n {\n \"spatial_dims\": spatial_dims,\n \"in_channels\": in_channels,\n \"out_channels\": out_channels,\n \"kernel_size\": kernel_size,\n \"strides\": strides,\n \"upsample_kernel_size\": strides[1:],\n \"norm_name\": \"batch\",\n \"act_name\": (\"leakyrelu\", {\"inplace\": True, \"negative_slope\": 0.2}),\n \"deep_supervision\": False,\n \"res_block\": res_block,\n \"dropout\": None,\n },\n (1, in_channels, in_size, in_size),\n expected_shape,\n ]\n TEST_CASE_DYNUNET_2D.append(test_case)\n\nTEST_CASE_DYNUNET_3D = [] # in 3d cases, also test anisotropic kernel/strides\nin_channels = 1\nin_size = 64\nfor out_channels in [2, 3]:\n expected_shape = (1, out_channels, 64, 32, 64)\n for res_block in [True, False]:\n test_case = [\n {\n \"spatial_dims\": 3,\n \"in_channels\": in_channels,\n \"out_channels\": out_channels,\n \"kernel_size\": (3, (1, 1, 3), 3, 3),\n \"strides\": ((1, 2, 1), 2, 2, 1),\n \"upsample_kernel_size\": (2, 2, 1),\n \"filters\": (64, 96, 128, 192),\n \"norm_name\": (\"INSTANCE\", {\"affine\": True}),\n \"deep_supervision\": True,\n \"res_block\": res_block,\n \"dropout\": (\"alphadropout\", {\"p\": 0.25}),\n },\n (1, in_channels, in_size, in_size, in_size),\n expected_shape,\n ]\n TEST_CASE_DYNUNET_3D.append(test_case)\n\nTEST_CASE_DEEP_SUPERVISION = []\nfor spatial_dims in [2, 3]:\n for res_block in [True, False]:\n for deep_supr_num in [1, 2]:\n for strides in [(1, 2, 1, 2, 1), (2, 2, 2, 1), (2, 1, 1, 2, 2)]:\n scale = strides[0]\n test_case = [\n {\n \"spatial_dims\": spatial_dims,\n \"in_channels\": 1,\n \"out_channels\": 2,\n \"kernel_size\": [3] * len(strides),\n \"strides\": strides,\n \"upsample_kernel_size\": strides[1:],\n \"norm_name\": (\"group\", {\"num_groups\": 16}),\n \"deep_supervision\": True,\n \"deep_supr_num\": deep_supr_num,\n \"res_block\": res_block,\n },\n (1, 1, *[in_size] * spatial_dims),\n (1, 1 + deep_supr_num, 2, *[in_size // scale] * spatial_dims),\n ]\n TEST_CASE_DEEP_SUPERVISION.append(test_case)\n\n\nclass TestDynUNet(unittest.TestCase):\n @parameterized.expand(TEST_CASE_DYNUNET_2D + TEST_CASE_DYNUNET_3D)\n def test_shape(self, input_param, input_shape, expected_shape):\n net = DynUNet(**input_param).to(device)\n with eval_mode(net):\n result = net(torch.randn(input_shape).to(device))\n self.assertEqual(result.shape, expected_shape)\n\n def test_script(self):\n input_param, input_shape, _ = TEST_CASE_DYNUNET_2D[0]\n net = DynUNet(**input_param)\n test_data = torch.randn(input_shape)\n test_script_save(net, test_data)\n\n\n@skip_if_no_cuda\n@skip_if_windows\nclass TestDynUNetWithInstanceNorm3dNVFuser(unittest.TestCase):\n @parameterized.expand([TEST_CASE_DYNUNET_3D[0]])\n def test_consistency(self, input_param, input_shape, _):\n for eps in [1e-4, 1e-5]:\n for momentum in [0.1, 0.01]:\n for affine in [True, False]:\n norm_param = {\"eps\": eps, \"momentum\": momentum, \"affine\": affine}\n input_param[\"norm_name\"] = (\"instance\", norm_param)\n input_param_fuser = input_param.copy()\n input_param_fuser[\"norm_name\"] = (\"instance_nvfuser\", norm_param)\n for memory_format in [torch.contiguous_format, torch.channels_last_3d]:\n net = DynUNet(**input_param).to(\"cuda:0\", memory_format=memory_format)\n net_fuser = DynUNet(**input_param_fuser).to(\"cuda:0\", memory_format=memory_format)\n net_fuser.load_state_dict(net.state_dict())\n\n input_tensor = torch.randn(input_shape).to(\"cuda:0\", memory_format=memory_format)\n with eval_mode(net):\n result = net(input_tensor)\n with eval_mode(net_fuser):\n result_fuser = net_fuser(input_tensor)\n\n # torch.testing.assert_allclose() is deprecated since 1.12 and will be removed in 1.14\n if pytorch_after(1, 12):\n torch.testing.assert_close(result, result_fuser)\n else:\n torch.testing.assert_allclose(result, result_fuser)\n\n\nclass TestDynUNetDeepSupervision(unittest.TestCase):\n @parameterized.expand(TEST_CASE_DEEP_SUPERVISION)\n def test_shape(self, input_param, input_shape, expected_shape):\n net = DynUNet(**input_param).to(device)\n with torch.no_grad():\n results = net(torch.randn(input_shape).to(device))\n self.assertEqual(results.shape, expected_shape)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport nibabel as nib\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.apps.deepgrow.dataset import create_dataset\nfrom monai.utils import set_determinism\n\nTEST_CASE_1 = [{\"dimension\": 2, \"pixdim\": (1, 1)}, {\"length\": 3}, 9, 1]\n\nTEST_CASE_2 = [{\"dimension\": 2, \"pixdim\": (1, 1), \"limit\": 1}, {\"length\": 3}, 3, 1]\n\nTEST_CASE_3 = [{\"dimension\": 2, \"pixdim\": (1, 1)}, {\"length\": 1}, 3, 1]\n\nTEST_CASE_4 = [{\"dimension\": 3, \"pixdim\": (1, 1, 1)}, {\"length\": 1}, 1, 1]\n\nTEST_CASE_5 = [{\"dimension\": 3, \"pixdim\": (1, 1, 1)}, {\"length\": 1, \"image_channel\": 4}, 1, 1]\n\nTEST_CASE_6 = [{\"dimension\": 2, \"pixdim\": (1, 1)}, {\"length\": 1, \"image_channel\": 4}, 3, 1]\n\nTEST_CASE_7 = [\n {\"dimension\": 2, \"pixdim\": (1, 1), \"label_key\": None},\n {\"length\": 1, \"image_channel\": 4, \"with_label\": False},\n 40,\n None,\n]\n\nTEST_CASE_8 = [\n {\"dimension\": 3, \"pixdim\": (1, 1, 1), \"label_key\": None},\n {\"length\": 1, \"image_channel\": 4, \"with_label\": False},\n 1,\n None,\n]\n\n\nclass TestCreateDataset(unittest.TestCase):\n def setUp(self):\n set_determinism(1)\n self.tempdir = tempfile.mkdtemp()\n\n def _create_data(self, length=1, image_channel=1, with_label=True):\n affine = np.eye(4)\n datalist = []\n for i in range(length):\n if image_channel == 1:\n image = np.random.randint(0, 2, size=(128, 128, 40))\n else:\n image = np.random.randint(0, 2, size=(128, 128, 40, image_channel))\n image_file = os.path.join(self.tempdir, f\"image{i}.nii.gz\")\n nib.save(nib.Nifti1Image(image, affine), image_file)\n\n if with_label:\n # 3 slices has label\n label = np.zeros((128, 128, 40))\n label[0][1][0] = 1\n label[0][1][1] = 1\n label[0][0][2] = 1\n label[0][1][2] = 1\n label_file = os.path.join(self.tempdir, f\"label{i}.nii.gz\")\n nib.save(nib.Nifti1Image(label, affine), label_file)\n datalist.append({\"image\": image_file, \"label\": label_file})\n else:\n datalist.append({\"image\": image_file})\n\n return datalist\n\n @parameterized.expand(\n [TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7, TEST_CASE_8]\n )\n def test_create_dataset(self, args, data_args, expected_length, expected_region):\n datalist = self._create_data(**data_args)\n deepgrow_datalist = create_dataset(datalist=datalist, output_dir=self.tempdir, **args)\n self.assertEqual(len(deepgrow_datalist), expected_length)\n if expected_region is not None:\n self.assertEqual(deepgrow_datalist[0][\"region\"], expected_region)\n\n def test_invalid_dim(self):\n with self.assertRaises(ValueError):\n create_dataset(datalist=self._create_data(), output_dir=self.tempdir, dimension=4, pixdim=(1, 1, 1, 1))\n\n def test_empty_datalist(self):\n with self.assertRaises(ValueError):\n create_dataset(datalist=[], output_dir=self.tempdir, dimension=3, pixdim=(1, 1, 1))\n\n def tearDown(self):\n shutil.rmtree(self.tempdir)\n set_determinism(None)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\n\nfrom monai.transforms import RandAxisFlipd\nfrom tests.utils import TEST_NDARRAYS, NumpyImageTestCase3D, assert_allclose\n\n\nclass TestRandAxisFlip(NumpyImageTestCase3D):\n def test_correct_results(self):\n for p in TEST_NDARRAYS:\n flip = RandAxisFlipd(keys=\"img\", prob=1.0)\n result = flip({\"img\": p(self.imt[0])})[\"img\"]\n\n expected = [np.flip(channel, flip.flipper._axis) for channel in self.imt[0]]\n assert_allclose(result, p(np.stack(expected)))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.transforms import GridSplitd\nfrom tests.utils import TEST_NDARRAYS, assert_allclose\n\nA11 = torch.randn(3, 2, 2)\nA12 = torch.randn(3, 2, 2)\nA21 = torch.randn(3, 2, 2)\nA22 = torch.randn(3, 2, 2)\n\nA1 = torch.cat([A11, A12], 2)\nA2 = torch.cat([A21, A22], 2)\nA = torch.cat([A1, A2], 1)\n\nTEST_CASE_0 = [{\"keys\": \"image\", \"grid\": (2, 2)}, {\"image\": A}, [A11, A12, A21, A22]]\nTEST_CASE_1 = [{\"keys\": \"image\", \"grid\": (2, 1)}, {\"image\": A}, [A1, A2]]\nTEST_CASE_2 = [{\"keys\": \"image\", \"grid\": (1, 2)}, {\"image\": A1}, [A11, A12]]\nTEST_CASE_3 = [{\"keys\": \"image\", \"grid\": (1, 2)}, {\"image\": A2}, [A21, A22]]\nTEST_CASE_4 = [{\"keys\": \"image\", \"grid\": (1, 1), \"size\": {\"image\": (2, 2)}}, {\"image\": A}, [A11]]\nTEST_CASE_5 = [{\"keys\": \"image\", \"grid\": (1, 1), \"size\": {\"image\": 4}}, {\"image\": A}, [A]]\nTEST_CASE_6 = [{\"keys\": \"image\", \"grid\": (2, 2), \"size\": {\"image\": 2}}, {\"image\": A}, [A11, A12, A21, A22]]\nTEST_CASE_7 = [{\"keys\": \"image\", \"grid\": (1, 1)}, {\"image\": A}, [A]]\nTEST_CASE_8 = [\n {\"keys\": \"image\", \"grid\": (2, 2), \"size\": {\"image\": 2}},\n {\"image\": torch.arange(12).reshape(1, 3, 4).to(torch.float32)},\n torch.Tensor([[[[0, 1], [4, 5]]], [[[2, 3], [6, 7]]], [[[4, 5], [8, 9]]], [[[6, 7], [10, 11]]]]).to(torch.float32),\n]\n\nTEST_SINGLE = []\nfor p in TEST_NDARRAYS:\n TEST_SINGLE.append([p, *TEST_CASE_0])\n TEST_SINGLE.append([p, *TEST_CASE_1])\n TEST_SINGLE.append([p, *TEST_CASE_2])\n TEST_SINGLE.append([p, *TEST_CASE_3])\n TEST_SINGLE.append([p, *TEST_CASE_4])\n TEST_SINGLE.append([p, *TEST_CASE_5])\n TEST_SINGLE.append([p, *TEST_CASE_6])\n TEST_SINGLE.append([p, *TEST_CASE_7])\n TEST_SINGLE.append([p, *TEST_CASE_8])\n\nTEST_CASE_MC_0 = [\n {\"keys\": \"image\", \"grid\": (2, 2)},\n [{\"image\": A}, {\"image\": A}],\n [[A11, A12, A21, A22], [A11, A12, A21, A22]],\n]\nTEST_CASE_MC_1 = [{\"keys\": \"image\", \"grid\": (2, 1)}, [{\"image\": A}, {\"image\": A}, {\"image\": A}], [[A1, A2]] * 3]\nTEST_CASE_MC_2 = [{\"keys\": \"image\", \"grid\": (1, 2)}, [{\"image\": A1}, {\"image\": A2}], [[A11, A12], [A21, A22]]]\n\nTEST_MULTIPLE = []\nfor p in TEST_NDARRAYS:\n TEST_MULTIPLE.append([p, *TEST_CASE_MC_0])\n TEST_MULTIPLE.append([p, *TEST_CASE_MC_1])\n TEST_MULTIPLE.append([p, *TEST_CASE_MC_2])\n\n\nclass TestGridSplitd(unittest.TestCase):\n @parameterized.expand(TEST_SINGLE)\n def test_split_patch_single_call(self, in_type, input_parameters, img_dict, expected):\n input_dict = {}\n for k, v in img_dict.items():\n input_dict[k] = in_type(v)\n splitter = GridSplitd(**input_parameters)\n output = splitter(input_dict)\n for output_patch, expected_patch in zip(output, expected):\n assert_allclose(output_patch[input_parameters[\"keys\"]], expected_patch, type_test=False)\n\n @parameterized.expand(TEST_MULTIPLE)\n def test_split_patch_multiple_call(self, in_type, input_parameters, img_list, expected_list):\n splitter = GridSplitd(**input_parameters)\n for img_dict, expected in zip(img_list, expected_list):\n input_dict = {}\n for k, v in img_dict.items():\n input_dict[k] = in_type(v)\n output = splitter(input_dict)\n for output_patch, expected_patch in zip(output, expected):\n assert_allclose(output_patch[input_parameters[\"keys\"]], expected_patch, type_test=False)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import Compose, RandSpatialCropSamplesd, ToTensord\nfrom monai.utils.enums import PostFix\nfrom tests.utils import TEST_NDARRAYS, assert_allclose\n\nTEST_CASE_1 = [\n {\"keys\": [\"img\", \"seg\"], \"num_samples\": 4, \"roi_size\": [2, 2, 2], \"random_center\": True},\n {\"img\": np.arange(81).reshape(3, 3, 3, 3), \"seg\": np.arange(81, 0, -1).reshape(3, 3, 3, 3)},\n [(3, 3, 3, 2), (3, 2, 2, 2), (3, 3, 3, 2), (3, 3, 2, 2)],\n {\n \"img\": np.array(\n [\n [[[0, 1], [3, 4]], [[9, 10], [12, 13]], [[18, 19], [21, 22]]],\n [[[27, 28], [30, 31]], [[36, 37], [39, 40]], [[45, 46], [48, 49]]],\n [[[54, 55], [57, 58]], [[63, 64], [66, 67]], [[72, 73], [75, 76]]],\n ]\n ),\n \"seg\": np.array(\n [\n [[[81, 80], [78, 77]], [[72, 71], [69, 68]], [[63, 62], [60, 59]]],\n [[[54, 53], [51, 50]], [[45, 44], [42, 41]], [[36, 35], [33, 32]]],\n [[[27, 26], [24, 23]], [[18, 17], [15, 14]], [[9, 8], [6, 5]]],\n ]\n ),\n },\n]\n\nTEST_CASE_2 = []\nfor p in TEST_NDARRAYS:\n TEST_CASE_2.append(\n [\n {\"keys\": [\"img\", \"seg\"], \"num_samples\": 8, \"roi_size\": [2, 2, 3], \"random_center\": False},\n {\"img\": p(np.arange(81).reshape(3, 3, 3, 3)), \"seg\": p(np.arange(81, 0, -1).reshape(3, 3, 3, 3))},\n [\n (3, 3, 3, 3),\n (3, 2, 3, 3),\n (3, 2, 2, 3),\n (3, 2, 3, 3),\n (3, 3, 3, 3),\n (3, 3, 3, 3),\n (3, 2, 2, 3),\n (3, 3, 2, 3),\n ],\n {\n \"img\": p(\n np.array(\n [\n [[[0, 1, 2], [3, 4, 5]], [[9, 10, 11], [12, 13, 14]], [[18, 19, 20], [21, 22, 23]]],\n [[[27, 28, 29], [30, 31, 32]], [[36, 37, 38], [39, 40, 41]], [[45, 46, 47], [48, 49, 50]]],\n [[[54, 55, 56], [57, 58, 59]], [[63, 64, 65], [66, 67, 68]], [[72, 73, 74], [75, 76, 77]]],\n ]\n )\n ),\n \"seg\": p(\n np.array(\n [\n [[[81, 80, 79], [78, 77, 76]], [[72, 71, 70], [69, 68, 67]], [[63, 62, 61], [60, 59, 58]]],\n [[[54, 53, 52], [51, 50, 49]], [[45, 44, 43], [42, 41, 40]], [[36, 35, 34], [33, 32, 31]]],\n [[[27, 26, 25], [24, 23, 22]], [[18, 17, 16], [15, 14, 13]], [[9, 8, 7], [6, 5, 4]]],\n ]\n )\n ),\n },\n ]\n )\n\n\nclass TestRandSpatialCropSamplesd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, *TEST_CASE_2])\n def test_shape(self, input_param, input_data, expected_shape, expected_last):\n xform = RandSpatialCropSamplesd(**input_param)\n xform.set_random_state(1234)\n result = xform(input_data)\n for item, expected in zip(result, expected_shape):\n self.assertTupleEqual(item[\"img\"].shape, expected)\n self.assertTupleEqual(item[\"seg\"].shape, expected)\n for i, item in enumerate(result):\n self.assertEqual(item[PostFix.meta(\"img\")][\"patch_index\"], i)\n self.assertEqual(item[PostFix.meta(\"seg\")][\"patch_index\"], i)\n assert_allclose(item[\"img\"], expected_last[\"img\"], type_test=True)\n assert_allclose(item[\"seg\"], expected_last[\"seg\"], type_test=True)\n\n def test_deep_copy(self):\n data = {\"img\": np.ones((1, 10, 11, 12))}\n num_samples = 3\n sampler = RandSpatialCropSamplesd(\n keys=[\"img\"], roi_size=(3, 3, 3), num_samples=num_samples, random_center=True, random_size=False\n )\n transform = Compose([ToTensord(keys=\"img\"), sampler])\n samples = transform(data)\n self.assertEqual(len(samples), num_samples)\n for sample in samples:\n self.assertEqual(len(sample[\"img_transforms\"]), len(transform))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.networks.nets import DiNTS, TopologyInstance, TopologySearch\nfrom monai.networks.nets.dints import Cell\nfrom tests.utils import SkipIfBeforePyTorchVersion, test_script_save\n\nTEST_CASES_3D = [\n [\n {\n \"channel_mul\": 0.2,\n \"num_blocks\": 6,\n \"num_depths\": 3,\n \"device\": \"cpu\",\n \"use_downsample\": False,\n \"spatial_dims\": 3,\n },\n {\n \"in_channels\": 1,\n \"num_classes\": 3,\n \"act_name\": \"RELU\",\n \"norm_name\": (\"INSTANCE\", {\"affine\": True}),\n \"use_downsample\": False,\n \"spatial_dims\": 3,\n },\n (3, 1, 32, 32, 16),\n (3, 3, 32, 32, 16),\n ]\n]\nif torch.cuda.is_available():\n TEST_CASES_3D += [\n [\n {\n \"channel_mul\": 0.5,\n \"num_blocks\": 7,\n \"num_depths\": 4,\n \"device\": \"cuda\",\n \"use_downsample\": True,\n \"spatial_dims\": 3,\n },\n {\n \"in_channels\": 2,\n \"num_classes\": 2,\n \"act_name\": \"PRELU\",\n \"norm_name\": \"BATCH\",\n \"use_downsample\": True,\n \"spatial_dims\": 3,\n },\n (3, 2, 32, 32, 16),\n (3, 2, 32, 32, 16),\n ]\n ]\nTEST_CASES_2D = [\n [\n {\n \"channel_mul\": 1,\n \"num_blocks\": 7,\n \"num_depths\": 4,\n \"device\": \"cpu\",\n \"use_downsample\": True,\n \"spatial_dims\": 2,\n },\n {\n \"in_channels\": 2,\n \"num_classes\": 2,\n \"act_name\": \"PRELU\",\n \"norm_name\": \"BATCH\",\n \"use_downsample\": True,\n \"spatial_dims\": 2,\n },\n (2, 2, 32, 16),\n (2, 2, 32, 16),\n ]\n]\nif torch.cuda.is_available():\n TEST_CASES_2D += [\n [\n {\n \"channel_mul\": 0.5,\n \"num_blocks\": 8,\n \"num_depths\": 4,\n \"device\": \"cuda\",\n \"use_downsample\": False,\n \"spatial_dims\": 2,\n },\n {\n \"in_channels\": 1,\n \"num_classes\": 4,\n \"act_name\": \"RELU\",\n \"norm_name\": (\"INSTANCE\", {\"affine\": True}),\n \"use_downsample\": False,\n \"spatial_dims\": 2,\n },\n (2, 1, 32, 16),\n (2, 4, 32, 16),\n ]\n ]\n\n\nclass TestDints(unittest.TestCase):\n @parameterized.expand(TEST_CASES_3D + TEST_CASES_2D)\n def test_dints_inference(self, dints_grid_params, dints_params, input_shape, expected_shape):\n grid = TopologySearch(**dints_grid_params)\n dints_params[\"dints_space\"] = grid\n net = DiNTS(**dints_params).to(dints_grid_params[\"device\"])\n result = net(torch.randn(input_shape).to(dints_grid_params[\"device\"]))\n self.assertEqual(result.shape, expected_shape)\n # test functions\n grid.get_ram_cost_usage(in_size=input_shape, full=True)\n grid.get_ram_cost_usage(in_size=input_shape, full=False)\n probs_a, _ = grid.get_prob_a(child=True)\n grid.get_topology_entropy(probs_a)\n grid.decode()\n grid.gen_mtx(depth=4)\n\n @parameterized.expand(TEST_CASES_3D + TEST_CASES_2D)\n def test_dints_search(self, dints_grid_params, dints_params, input_shape, expected_shape):\n num_blocks = dints_grid_params[\"num_blocks\"]\n num_depths = dints_grid_params[\"num_depths\"]\n # init a Cell to obtain cell operation number\n _cell = Cell(1, 1, 0, spatial_dims=dints_grid_params[\"spatial_dims\"])\n num_cell_ops = len(_cell.OPS)\n # define archtecture codes\n node_a = torch.ones((num_blocks + 1, num_depths))\n arch_code_a = np.ones((num_blocks, 3 * num_depths - 2))\n arch_code_c = np.random.randint(num_cell_ops, size=(num_blocks, 3 * num_depths - 2))\n # initialize with codes\n dints_grid_params[\"arch_code\"] = [arch_code_a, arch_code_c]\n grid = TopologyInstance(**dints_grid_params)\n # set as deploy stage\n dints_params[\"dints_space\"] = grid\n dints_params[\"node_a\"] = node_a\n net = DiNTS(**dints_params).to(dints_grid_params[\"device\"])\n result = net(torch.randn(input_shape).to(dints_grid_params[\"device\"]))\n self.assertEqual(result.shape, expected_shape)\n self.assertTrue(isinstance(net.weight_parameters(), list))\n\n\n@SkipIfBeforePyTorchVersion((1, 9))\nclass TestDintsTS(unittest.TestCase):\n @parameterized.expand(TEST_CASES_3D + TEST_CASES_2D)\n def test_script(self, dints_grid_params, dints_params, input_shape, _):\n grid = TopologyInstance(**dints_grid_params)\n dints_grid_params[\"device\"] = \"cpu\"\n dints_params[\"dints_space\"] = grid\n net = DiNTS(**dints_params).to(dints_grid_params[\"device\"])\n test_script_save(net, torch.randn(input_shape).to(dints_grid_params[\"device\"]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.heaviside",
"numpy.expand_dims",
"numpy.linspace",
"torch.randn",
"numpy.stack",
"numpy.fft.ifft",
"torch.cuda.is_available",
"numpy.fft.fftfreq",
"torch.device",
"numpy.hanning",
"torch.as_tensor"
],
[
"torch.cuda.is_available",
"torch.tensor"
],
[
"numpy.asarray",
"torch.as_tensor"
],
[
"numpy.arange",
"numpy.eye",
"numpy.array",
"numpy.random.randn"
],
[
"torch.tensor"
],
[
"torch.cuda.amp.autocast"
],
[
"torch.nn.functional.layer_norm",
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.zeros",
"torch.cat",
"numpy.power",
"torch.nn.ModuleList",
"torch.flatten",
"torch.nn.Linear",
"torch.nn.Identity",
"numpy.ceil",
"torch.no_grad",
"torch.utils.checkpoint.checkpoint",
"torch.arange",
"torch.roll",
"torch.meshgrid",
"torch.nn.functional.pad"
],
[
"torch.cuda.is_available",
"torch.tensor"
],
[
"torch.utils.data.DataLoader"
],
[
"torch.isinf",
"torch.isnan",
"torch.cat",
"torch.tensor",
"torch.exp",
"torch.log",
"torch.stack",
"torch.as_tensor"
],
[
"numpy.random.rand",
"numpy.iinfo",
"numpy.testing.assert_allclose",
"numpy.random.randint"
],
[
"torch.testing.assert_allclose",
"torch.randn",
"torch.no_grad",
"torch.cuda.is_available",
"torch.testing.assert_close"
],
[
"numpy.eye",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.flip",
"numpy.stack"
],
[
"torch.randn",
"torch.arange",
"torch.Tensor",
"torch.cat"
],
[
"numpy.arange",
"numpy.array",
"numpy.ones"
],
[
"torch.ones",
"torch.randn",
"numpy.ones",
"torch.cuda.is_available",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HoliestCow/ece692_deeplearning | [
"638c27e0d9c01ec9b0a8be8a85e54937645a897e"
] | [
"project1/multiprocess.py"
] | [
"import os\nimport sys\nimport multiprocessing\nimport copyreg\nimport types\nimport time\nfrom six import string_types\nfrom progressbar import ProgressBar, FormatLabel, Percentage, Bar, ETA\nimport numpy as np\nimport pandas as pd\nfrom subprocess import Popen\nfrom collections import OrderedDict\nfrom basic_class import BasicClass\n\n\nMAX_NCORES = multiprocessing.cpu_count()\nSAFE_NCORES = MAX_NCORES - 2\n\n\n# -----------------------------------------------------------------------------\n# This is a trick to allow multiprocessing to use target functions that are\n# object methods. This is used for the algorithms which are trained and then\n# evaluations are completed inside of MP threads\n# -----------------------------------------------------------------------------\ndef _pickle_method(m):\n if m.im_self is None:\n return getattr, (m.im_class, m.im_func.func_name)\n else:\n return getattr, (m.im_self, m.im_func.func_name)\n\n\ncopyreg.pickle(types.MethodType, _pickle_method)\n\n\ndef _mute_stdout():\n sys.stdout = open(os.devnull, 'w')\n\n\nclass _MultiProcessor(BasicClass):\n\n def __init__(self, ncores=SAFE_NCORES):\n self.ncores = ncores\n\n @property\n def ncores(self):\n return self._ncores\n\n @ncores.setter\n def ncores(self, x):\n if x is None:\n ncores = self._ncores\n elif isinstance(x, string_types):\n if x.lower() == 'max':\n ncores = MAX_NCORES\n elif x.lower() == 'safe':\n ncores = SAFE_NCORES\n elif x.isdigit():\n ncores = int(x)\n else:\n raise ValueError('Unrecognized `ncores`: {}'.format(x))\n else:\n ncores = int(x)\n if ncores <= 0:\n raise ValueError('`ncores` must be positive: {}'.format(ncores))\n if ncores > MAX_NCORES:\n raise ValueError(\n 'ncores={} exceeds MAX_NCORES={}'.format(ncores, MAX_NCORES))\n self._ncores = ncores\n self._print('Using {} cores'.format(self.ncores))\n\n\nclass PyMultiProcessor(_MultiProcessor):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n copyreg.pickle(types.MethodType, _pickle_method)\n self.reset_cmds()\n\n def reset_cmds(self):\n self.funcs = []\n self.args = []\n self.num_cmds = 0\n\n def add_func_and_args(self, func, args):\n \"\"\"\n Add one function and dict of args to be processed\n \"\"\"\n assert isinstance(args, dict), '`args` not dict: {}'.format(args)\n self.funcs.append(func)\n self.args.append(args)\n self.num_cmds += 1\n\n def run_processes(self, ncores=None, concat=False,\n mute_stdout=True):\n \"\"\"\n Run all functions and args added via `add_func_and_args`\n If `ncores=1` multiprocessing is not used.\n If `concat` the results will be attempted to be concatenated along\n axis=1 (column-wise)\n \"\"\"\n self.ncores = ncores\n # ProgressBar stuff\n widgets = [\n FormatLabel('Processed: %(value)d of {} '.format(self.num_cmds)),\n Percentage(),\n Bar(),\n ETA()]\n # Storage\n results = []\n # Single process\n if self.ncores == 1:\n pbar = ProgressBar(widgets=widgets, maxval=self.num_cmds).start()\n for i, (f, a) in enumerate(zip(self.funcs, self.args)):\n results.append(f(**a))\n pbar.update(i + 1)\n pbar.finish()\n # Multiprocess\n else:\n pbar = ProgressBar(widgets=widgets, maxval=self.num_cmds).start()\n if mute_stdout:\n self.pool = multiprocessing.Pool(processes=self.ncores,\n initializer=_mute_stdout)\n else:\n self.pool = multiprocessing.Pool(processes=self.ncores)\n procs = []\n # Start procs\n for i, (f, a) in enumerate(zip(self.funcs, self.args)):\n procs.append(self.pool.apply_async(f, (), a))\n # Wait for and collect results\n for i, p in enumerate(procs):\n results.append(p.get())\n pbar.update(i + 1)\n pbar.finish()\n self.pool.close()\n if concat:\n # Concat dataframes?\n if all([isinstance(x, pd.DataFrame) for x in results]):\n self.results = pd.concat(results, axis=0)\n self.results.sort_index(inplace=True)\n # Concat dicts of arrays?\n elif all([isinstance(x, dict) for x in results]):\n self.results = OrderedDict()\n # Commented this to make it work.\n # for result in results:\n # for k in result:\n # if not isinstance(result[k], np.ndarray):\n # result[k] = np.array(result[k])\n # append_to_dict_list(self.results, k, result[k])\n for k in self.results:\n self.results[k] = np.concatenate(results[k], axis=0)\n else:\n self.results = results\n else:\n self.results = results\n # Cleanup\n self.reset_cmds()\n return self.results\n\n\ndef run_process(cmd=[], log_fname=None, **kwargs):\n \"\"\"\n cmd_dict is a dictionary of the command line command and arguments,\n log filename and the process number (ID)\n \"\"\"\n # Starting time\n start_time = time.time()\n # Discard stdout to /dev/null\n if (log_fname == 'null') or (log_fname is False):\n proc = Popen(cmd, stdout=open(os.devnull, 'w'))\n # Print stdout normally\n elif log_fname is None:\n proc = Popen(cmd)\n # Save stdout to file\n else:\n proc = Popen(cmd, stdout=open(log_fname, 'w'))\n # Wait for command to finish...\n proc.wait()\n end_time = time.time()\n return {'pid': proc.pid,\n 'cmd': ' '.join(cmd),\n 'start_time': start_time,\n 'end_time': end_time,\n 'duration_sec': end_time - start_time}\n"
] | [
[
"numpy.concatenate",
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Minys233/GCN-BMP | [
"21b64a3c8cc9bc33718ae09c65aa917e575132eb"
] | [
"train_binary.py"
] | [
"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 12/8/2018 6:54 PM\r\n# @Author : chinshin\r\n# @FileName: train_ddi.py\r\n\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals\r\n\r\nimport os\r\nimport sys\r\nimport random\r\nimport chainer\r\nimport logging\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use('AGG')\r\nfrom chainer.backends import cuda\r\nfrom chainer import functions as F\r\nfrom chainer import links\r\nfrom chainer import optimizers\r\nfrom chainer import training\r\nfrom chainer.iterators import SerialIterator\r\nfrom chainer.training import extensions as E\r\nfrom chainer.training import triggers\r\nfrom chainer_chemistry.links import GraphLinear\r\nfrom argparse import ArgumentParser\r\nfrom os.path import dirname, abspath\r\nROOT_PATH = dirname(dirname(dirname(abspath(__file__))))\r\nsys.path.insert(0, ROOT_PATH)\r\nfrom parsers import CSVFileParserForPair\r\nfrom chainer_chemistry.dataset.preprocessors import preprocess_method_dict\r\nfrom chainer_chemistry.dataset.converters import concat_mols\r\nfrom chainer_chemistry.datasets import NumpyTupleDataset\r\nfrom chainer_chemistry.training.extensions import ROCAUCEvaluator, PRCAUCEvaluator, PrecisionEvaluator, RecallEvaluator, F1Evaluator, AccuracyEvaluator\r\nfrom chainer_chemistry.models import MLP, SchNet, WeaveNet, RSGCN, Regressor, Classifier, Cosine\r\n# 稳定版本\r\n# from models.chin_ggnn import GGNN\r\n# from models.ggnn_dev import GGNN\r\nfrom models.ggnn_att import GGNN\r\nfrom models.models import NFP\r\n# 引入共注意力机制\r\nfrom models.coattention.alternating_coattention import AlternatingCoattention\r\nfrom models.coattention.parallel_coattention import ParallelCoattention, CircularParallelCoattention\r\nfrom models.coattention.vqa_parallel_coattention import VQAParallelCoattention\r\nfrom models.coattention.PoolingFineCoattention import PoolingFineCoattention\r\nfrom models.coattention.lt_fine_coattention import LinearTransformFineCoattention\r\nfrom models.coattention.nie_coattention import NieFineCoattention, DeepNieFineCoattention, FourierFineCoattention, VeryDeepNieFineCoattention, \\\r\n ExtremeDeepNieFineCoattention\r\nfrom models.coattention.bimpm import BiMPM\r\n# from models.ggnn_dev_self_loop import GGNN\r\nfrom chainer_chemistry.models import NTN, SymMLP, HolE, DistMult\r\n\r\nlogging.basicConfig(format='%(asctime)s: %(filename)s: %(funcName)s: %(lineno)d: %(message)s', level=logging.INFO)\r\nglobal_seed = 2018\r\nrandom.seed(global_seed)\r\n\r\n\r\nclass GraphConvPredictorForPair(chainer.Chain):\r\n def __init__(self, graph_conv, attn=None, mlp=None, symmetric=None):\r\n \"\"\"Initializes the graph convolution predictor.\r\n\r\n Args:\r\n graph_conv: The graph convolution network required to obtain\r\n molecule feature representation.\r\n mlp: Multi layer perceptron; used as the final fully connected\r\n layer. Set it to `None` if no operation is necessary\r\n after the `graph_conv` calculation.\r\n \"\"\"\r\n\r\n super(GraphConvPredictorForPair, self).__init__()\r\n with self.init_scope():\r\n self.graph_conv = graph_conv\r\n if isinstance(mlp, chainer.Link):\r\n self.mlp = mlp\r\n if isinstance(attn, chainer.Link):\r\n self.attn = attn\r\n if not isinstance(mlp, chainer.Link):\r\n self.mlp = mlp\r\n if not isinstance(attn, chainer.Link):\r\n self.attn = attn\r\n self.symmetric = symmetric\r\n\r\n def __call__(self, atoms_1, adjs_1, atoms_2, adjs_2):\r\n if self.xp == cuda.cupy:\r\n atoms_1 = cuda.to_gpu(atoms_1)\r\n adjs_1 = cuda.to_gpu(adjs_1)\r\n atoms_2 = cuda.to_gpu(atoms_2)\r\n adjs_2 = cuda.to_gpu(adjs_2)\r\n\r\n g1 = self.graph_conv(atoms_1, adjs_1)\r\n atoms_1 = self.graph_conv.get_atom_array()\r\n g2 = self.graph_conv(atoms_2, adjs_2)\r\n atoms_2 = self.graph_conv.get_atom_array()\r\n\r\n g1, g2 = self.attn(atoms_1, g1, atoms_2, g2)\r\n\r\n if self.mlp.__class__.__name__ == 'MLP':\r\n g = F.concat((g1, g2), axis=-1)\r\n g = self.mlp(g)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'NTN':\r\n g = self.mlp(g1, g2)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'SymMLP':\r\n g = self.mlp(g1, g2)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'HolE':\r\n g = self.mlp(g1, g2)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'DistMult':\r\n g = self.mlp(g1, g2)\r\n return g\r\n elif self.mlp.__class__.__name__ == 'Cosine':\r\n g = self.mlp(g1, g2)\r\n return g\r\n else:\r\n ValueError('[ERROR] No methods for similarity prediction')\r\n\r\n def predict(self, atoms_1, adjs_1, atoms_2, adjs_2):\r\n if self.symmetric is None:\r\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\r\n x = self.__call__(atoms_1, adjs_1, atoms_2, adjs_2)\r\n target = F.sigmoid(x)\r\n if self.xp == cuda.cupy:\r\n target = cuda.to_gpu(target)\r\n return target\r\n elif self.symmetric == 'or' or self.symmetric == 'and':\r\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\r\n x1 = self.__call__(atoms_1, adjs_1, atoms_2, adjs_2)\r\n target1 = F.sigmoid(x1)\r\n x2 = self.__call__(atoms_2, adjs_2, atoms_1, adjs_1)\r\n target2 = F.sigmoid(x2)\r\n if self.xp == cuda.cupy:\r\n target1 = cuda.to_gpu(target1)\r\n target2 = cuda.to_gpu(target2)\r\n if self.symmetric == 'or':\r\n target = self.xp.max([target1, target2])\r\n elif self.symmetric == 'and':\r\n target = self.xp.min([target1, target2])\r\n return target\r\n\r\n\r\ndef set_up_predictor(method,\r\n fp_hidden_dim, fp_out_dim, conv_layers, concat_hidden,\r\n fp_dropout_rate, fp_batch_normalization,\r\n net_hidden_dims, class_num,\r\n weight_typing=True, sim_method='mlp', symmetric=None,\r\n attn_model=None\r\n ):\r\n\r\n sim_method_dict = {\r\n 'mlp': 'multi-layered perceptron',\r\n 'ntn': 'bilinear transform',\r\n 'symmlp': 'symmetric perceptron',\r\n 'hole': 'holographic embedding',\r\n 'dist-mult': 'dist-mult',\r\n }\r\n\r\n method_dict = {\r\n 'ggnn': 'GGNN',\r\n 'nfp': 'NFP',\r\n }\r\n\r\n logging.info('Graph Embedding: {}'.format(method_dict.get(method, None)))\r\n logging.info('Link Prediction: {}'.format(sim_method_dict.get(sim_method, None)))\r\n\r\n lp = None\r\n if sim_method == 'mlp':\r\n lp = MLP(out_dim=class_num, hidden_dims=net_hidden_dims)\r\n\r\n elif sim_method == 'ntn':\r\n ntn_out_dim = 8\r\n lp = NTN(left_dim=fp_out_dim, right_dim=fp_out_dim, out_dim=class_num,\r\n ntn_out_dim=ntn_out_dim, hidden_dims=net_hidden_dims)\r\n\r\n elif sim_method == 'symmlp':\r\n lp = MLP(out_dim=class_num, hidden_dims=net_hidden_dims)\r\n\r\n elif sim_method == 'hole':\r\n lp = HolE(out_dim=class_num, hidden_dims=net_hidden_dims)\r\n\r\n elif sim_method == 'dist-mult':\r\n dm_out_dim = 8\r\n lp = DistMult(left_dim=fp_out_dim, right_dim=fp_out_dim, out_dim=class_num,\r\n dm_out_dim=dm_out_dim, hidden_dims=net_hidden_dims)\r\n else:\r\n raise ValueError('[ERROR] Invalid link prediction model: {}'.format(method))\r\n\r\n attn = None\r\n scorer = 'bilinear'\r\n if attn_model == 'alter':\r\n attn_weight_tying = True\r\n logging.info('Using alternating co-attention')\r\n if attn_weight_tying:\r\n logging.info('Weight is tying')\r\n attn = AlternatingCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim, head=8, weight_tying=True)\r\n elif attn_model == 'para':\r\n attn_weight_tying = True\r\n logging.info('Using parallel co-attention')\r\n logging.info('Scorer is {}'.format(scorer))\r\n if attn_weight_tying:\r\n logging.info('Weight is tying')\r\n attn = ParallelCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim, head=1,\r\n activation=F.tanh, weight_tying=attn_weight_tying)\r\n elif attn_model == 'circ':\r\n logging.info('Using circular based parallel co-attention')\r\n attn = CircularParallelCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n activation=F.tanh)\r\n\r\n elif attn_model == 'vqa':\r\n logging.info('Using vqa fine-grained co-attention')\r\n attn = VQAParallelCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim, head=8)\r\n\r\n elif attn_model == 'pool':\r\n logging.info('Using pool fine-graind co-attention')\r\n attn = PoolingFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim)\r\n\r\n elif attn_model == 'lt':\r\n logging.info('Using lt fine-grained co-attention')\r\n attn = LinearTransformFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim)\r\n\r\n elif attn_model == 'nie':\r\n logging.info('Using nie fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = NieFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'deep':\r\n logging.info('Using deep fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = DeepNieFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'very-deep':\r\n logging.info('Using very deep fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = VeryDeepNieFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'extreme-deep':\r\n logging.info('Using very deep fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = ExtremeDeepNieFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'fourier':\r\n logging.info('Using fourier fine-grained co-attention')\r\n logging.info('Using activation function tanh')\r\n attn = FourierFineCoattention(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim,\r\n head=8, activation=F.tanh)\r\n\r\n elif attn_model == 'bimpm':\r\n logging.info('Using bimpm matching strategy')\r\n attn = BiMPM(hidden_dim=fp_hidden_dim, out_dim=fp_out_dim, head=fp_out_dim,\r\n with_max_pool=True, with_att_mean=True, with_att_max=True, aggr=F.sum)\r\n\r\n else:\r\n raise ValueError('[ERROR] Invalid Co-Attention Method.')\r\n\r\n encoder = None\r\n if method == 'ggnn':\r\n if not weight_typing:\r\n logging.info('Weight is not tying')\r\n if fp_dropout_rate != 0.0:\r\n logging.info('Forward propagation dropout rate is {:.1f}'.format(fp_dropout_rate))\r\n if fp_batch_normalization:\r\n logging.info('Using batch normalization')\r\n if concat_hidden:\r\n logging.info('Using concatenation between layers')\r\n\r\n encoder = GGNN(out_dim=fp_out_dim, hidden_dim=fp_hidden_dim, n_layers=conv_layers,\r\n concat_hidden=concat_hidden, weight_tying=weight_typing)\r\n elif method == 'nfp':\r\n print('Training an NFP predictor...')\r\n encoder = NFP(out_dim=fp_out_dim, hidden_dim=fp_hidden_dim, n_layers=conv_layers, concat_hidden=concat_hidden)\r\n\r\n else:\r\n raise ValueError('[ERROR] Invalid graph embedding encoder.')\r\n\r\n predictor = GraphConvPredictorForPair(encoder, attn, lp, symmetric=symmetric)\r\n return predictor\r\n\r\n\r\ndef augment_dataset(dataset):\r\n dataset_tuple = dataset.get_datasets()\r\n atoms1, adjs1, atoms2, adjs2, labels = dataset_tuple\r\n new_atoms1 = np.concatenate((atoms1, atoms2), axis=0)\r\n new_atoms2 = np.concatenate((atoms2, atoms1), axis=0)\r\n new_adjs1 = np.concatenate((adjs1, adjs2), axis=0)\r\n new_adjs2 = np.concatenate((adjs2, adjs1), axis=0)\r\n new_labels = np.concatenate((labels, labels), axis=0)\r\n new_dataset = NumpyTupleDataset(new_atoms1, new_adjs1, new_atoms2, new_adjs2, new_labels)\r\n return new_dataset\r\n\r\n\r\ndef parse_arguments():\r\n # Lists of supported preprocessing methods/models.\r\n method_list = ['nfp', 'ggnn', 'schnet', 'weavenet', 'rsgcn', 'ecfp']\r\n sim_method_list = ['mlp', 'cosine', 'ntn', 'symmlp', 'hole', 'dist-mult']\r\n layer_aggregator_list = ['gru-attn', 'gru', 'lstm-attn', 'lstm', 'attn', 'self-attn', 'concat', 'max-pool']\r\n attn_list = ['para', 'alter', 'circ', 'vqa', 'pool', 'lt', 'nie', 'bimpm', 'deep', 'fourier', 'very-deep', 'extreme-deep']\r\n\r\n # Set up the argument parser.\r\n parser = ArgumentParser(description='Classification on ddi dataset')\r\n parser.add_argument('--datafile', '-d', type=str,\r\n default='ddi_train.csv',\r\n help='csv file containing the dataset')\r\n parser.add_argument('--train-datafile', type=str,\r\n default='ddi_train.csv',\r\n help='csv file containing the train dataset')\r\n parser.add_argument('--train-pos-neg-ratio', type=float,\r\n default=-1.,\r\n help='ratio between positive and negative instances')\r\n parser.add_argument('--valid-datafile', type=str,\r\n default='ddi_test.csv',\r\n help='csv file containing the test dataset')\r\n parser.add_argument('--method', '-m', type=str, choices=method_list,\r\n help='method name', default='nfp')\r\n parser.add_argument('--sim-method', type=str, choices=sim_method_list,\r\n help='similarity method', default='mlp')\r\n parser.add_argument('--label', '-l', nargs='+',\r\n default=['label', ],\r\n help='target label for classification')\r\n parser.add_argument('--class-names', type=str,\r\n default=['interaction', 'no interactions'],\r\n help='class names in classification task')\r\n parser.add_argument('--conv-layers', '-c', type=int, default=4,\r\n help='number of convolution layers')\r\n parser.add_argument('--batchsize', '-b', type=int, default=32,\r\n help='batch size')\r\n parser.add_argument('--gpu', '-g', type=int, default=-1,\r\n help='id of gpu to use; negative value means running'\r\n 'the code on cpu')\r\n parser.add_argument('--out', '-o', type=str, default='result',\r\n help='path to save the computed models to')\r\n parser.add_argument('--epoch', '-e', type=int, default=10,\r\n help='number of epochs')\r\n parser.add_argument('--learning-rate', type=float, default=0.001,\r\n help='learning rate of optimizer')\r\n parser.add_argument('--weight-decay-rate', type=float, default=0.,\r\n help='weight decay rate of optimizer')\r\n parser.add_argument('--exp-shift-rate', type=float, default=1.,\r\n help='exponential shift rate')\r\n parser.add_argument('--exp-shift-strategy', type=int, default=1,\r\n help='strategy to adapt the learning rate manually')\r\n parser.add_argument('--lin-shift-rate', type=float, default=0.,\r\n help='linear shift rate')\r\n parser.add_argument('--unit-num', '-u', type=int, default=16,\r\n help='number of units in one layer of the models')\r\n parser.add_argument('--fp-out-dim', type=int, default=16,\r\n help='dimensionality of output of dynamic fingerprint')\r\n parser.add_argument('--fp-hidden-dim', type=int, default=16,\r\n help='dimensionality of hidden units in dynamic fingerprint')\r\n parser.add_argument('--fp-attention', type=bool, default=False,\r\n help='whether to use attention mechanism in dynamic fingerprint')\r\n parser.add_argument('--update-attention', type=bool, default=False,\r\n help='whether to use attention mechasnim in update')\r\n parser.add_argument('--concat-hidden', type=bool, default=False,\r\n help='whether to concatenate the hidden states in all graphconv layers')\r\n parser.add_argument('--fp-max-degree', type=int, default=6,\r\n help='max degrees of neural fingerprint')\r\n parser.add_argument('--weight-tying', type=str, default=True,\r\n help='whether to use the same parameters in all layers(Default: True)')\r\n parser.add_argument('--attention-tying', type=str, default=True,\r\n help='whether to use the same parameter in all attention(Default: True)')\r\n parser.add_argument('--fp-dropout-rate', type=float, default=0.0,\r\n help='dropout rate in graph convolutional neural network')\r\n parser.add_argument('--fp-bn', type=str, default='False',\r\n help='whether to use batch normalization in dynamic fingerprint')\r\n\r\n parser.add_argument('--attn', type=str, default=None, choices=attn_list,\r\n help='indicate the type of co-attention')\r\n\r\n parser.add_argument('--net-hidden-dims', type=str, default='32,16',\r\n help='dimensionality of hidden units in neural network for similarity prediction')\r\n parser.add_argument('--net-layer-num', type=int, default=2,\r\n help='number of layers in neural network for similarity prediction')\r\n parser.add_argument('--layer-aggregator', type=str, default='', choices=layer_aggregator_list,\r\n help='layer aggregator in dynamic fingerprint (Default: )')\r\n parser.add_argument('--seed', '-s', type=int, default=777,\r\n help='random seed value')\r\n parser.add_argument('--train-data-ratio', '-r', type=float, default=0.8,\r\n help='ratio of training data w.r.t the dataset')\r\n parser.add_argument('--protocol', type=int, default=2,\r\n help='pickle protocol version')\r\n parser.add_argument('--model-filename', type=str, default='classifier.pkl',\r\n help='saved models filename')\r\n parser.add_argument('--resume', type=str, default='',\r\n help='path to a trainer snapshot')\r\n parser.add_argument('--context', type=str, default='False',\r\n help='whether to use context embedding in dynamic fingerprint')\r\n parser.add_argument('--context-layers', type=int, default=1,\r\n help='number of context layers')\r\n parser.add_argument('--context-dropout', type=float, default=0.,\r\n help='dropout rate of context layers')\r\n parser.add_argument('--message-function', type=str, default='matrix_multiply',\r\n help='message function in dynamic fingerprint (default: matrix_multiply)')\r\n\r\n parser.add_argument('--readout-function', type=str, default='graph_level',\r\n help='readout function in dynamic fingerprint (default: graph_level)')\r\n parser.add_argument('--num-timesteps', type=int, default=3,\r\n help='number of timesteps in set2vec readout function')\r\n parser.add_argument('--num-output-hidden-layers', type=int, default=0,\r\n help='number of hidden layers in set2vec readout function')\r\n parser.add_argument('--output-hidden-dim', type=int, default=16,\r\n help='number of hidden units in each hidden layer in set2vec readout function')\r\n parser.add_argument('--output-activation', type=str, choices=['relu'],\r\n default='relu', help='activation function used in set2vec readout function')\r\n\r\n parser.add_argument('--multi-gpu', type=str, default='False',\r\n help='whether to use multiple GPUs')\r\n\r\n parser.add_argument('--augment', type=str, default='False',\r\n help='whether to use data augment')\r\n\r\n parser.add_argument('--max-norm', type=float, default=0.,\r\n help='the maximum value of gradient in back propagation')\r\n parser.add_argument('--l2-rate', type=float, default=0.,\r\n help='coefficient for the L2 regularization')\r\n parser.add_argument('--l1-rate', type=float, default=0.,\r\n help='coefficient for the L1 regularization')\r\n\r\n parser.add_argument('--loss-func', type=str, default='cross-entropy',\r\n help='loss function training the models')\r\n\r\n parser.add_argument('--symmetric', type=str, default=None,\r\n help='how to use symmetric in prediction')\r\n return parser.parse_args()\r\n\r\n\r\ndef modify_dataset_for_hinge(dataset):\r\n atoms1, adjs1, atoms2, adjs2, labels = dataset.get_datasets()\r\n labels_squeezed = np.squeeze(labels, axis=1)\r\n new_dataset = NumpyTupleDataset(atoms1, adjs1, atoms2, adjs2, labels_squeezed)\r\n return new_dataset\r\n\r\n\r\ndef main():\r\n # Parse the arguments.\r\n args = parse_arguments()\r\n augment = False if args.augment == 'False' else True\r\n multi_gpu = False if args.multi_gpu == 'False' else True\r\n if args.label:\r\n labels = args.label\r\n class_num = len(labels) if isinstance(labels, list) else 1\r\n else:\r\n raise ValueError('No target label was specified.')\r\n\r\n # Dataset preparation. Postprocessing is required for the regression task.\r\n def postprocess_label(label_list):\r\n label_arr = np.asarray(label_list, dtype=np.int32)\r\n return label_arr\r\n\r\n # Apply a preprocessor to the dataset.\r\n logging.info('Preprocess train dataset and test dataset...')\r\n preprocessor = preprocess_method_dict[args.method]()\r\n parser = CSVFileParserForPair(preprocessor, postprocess_label=postprocess_label,\r\n labels=labels, smiles_cols=['smiles_1', 'smiles_2'])\r\n train = parser.parse(args.train_datafile)['dataset']\r\n test = parser.parse(args.valid_datafile)['dataset']\r\n\r\n if augment:\r\n logging.info('Utilizing data augmentation in train set')\r\n train = augment_dataset(train)\r\n\r\n num_train = train.get_datasets()[0].shape[0]\r\n num_test = test.get_datasets()[0].shape[0]\r\n logging.info('Train/test split: {}/{}'.format(num_train, num_test))\r\n\r\n if len(args.net_hidden_dims):\r\n net_hidden_dims = tuple([int(net_hidden_dim) for net_hidden_dim in args.net_hidden_dims.split(',')])\r\n else:\r\n net_hidden_dims = ()\r\n\r\n weight_tying = False if args.weight_tying == 'False' else True\r\n fp_batch_normalization = True if args.fp_bn == 'True' else False\r\n\r\n predictor = set_up_predictor(method=args.method,\r\n fp_hidden_dim=args.fp_hidden_dim, fp_out_dim=args.fp_out_dim,\r\n conv_layers=args.conv_layers, concat_hidden=args.concat_hidden,\r\n fp_dropout_rate=args.fp_dropout_rate, fp_batch_normalization=fp_batch_normalization,\r\n net_hidden_dims=net_hidden_dims, class_num=class_num,\r\n sim_method=args.sim_method, weight_typing=weight_tying,\r\n symmetric=args.symmetric, attn_model=args.attn,\r\n )\r\n\r\n if args.train_pos_neg_ratio != -1.:\r\n # Set up the iterator.\r\n train_dataset = train.get_datasets()\r\n atoms1_train, adjs1_train, atoms2_train, adjs2_train, labels_train = train_dataset\r\n labels_train = np.squeeze(labels_train)\r\n train_dataset_arr = np.concatenate([item[:, None] if len(item.shape) == 1 else item for item in list(train_dataset)], axis=1)\r\n pos_train_dataset_arr = train_dataset_arr[labels_train == 1]\r\n num_pos_train = pos_train_dataset_arr.shape[0]\r\n pos_train_indices = np.arange(0, num_pos_train)\r\n neg_train_dataset_arr = train_dataset_arr[labels_train == 0]\r\n num_neg_train = neg_train_dataset_arr.shape[0]\r\n pos_neg_train_ratio = args.train_pos_neg_ratio\r\n num_pos_train = int(pos_neg_train_ratio * num_neg_train)\r\n np.random.seed(777)\r\n np.random.shuffle(pos_train_indices)\r\n pos_train_indices = pos_train_indices[:num_pos_train]\r\n pos_train_dataset_arr = pos_train_dataset_arr[pos_train_indices]\r\n new_train_dataset_arr = np.concatenate((pos_train_dataset_arr, neg_train_dataset_arr), axis=0)\r\n atoms1_train, adjs1_train = new_train_dataset_arr[:, 0], new_train_dataset_arr[:, 1]\r\n atoms2_train, adjs2_train = new_train_dataset_arr[:, 2], new_train_dataset_arr[:, 3]\r\n labels_train = new_train_dataset_arr[:, 4].astype(np.int32)\r\n labels_train = np.expand_dims(labels_train, axis=1)\r\n train = NumpyTupleDataset(atoms1_train, adjs1_train, atoms2_train, adjs2_train, labels_train)\r\n num_train = train.get_datasets()[0].shape[0]\r\n num_test = test.get_datasets()[0].shape[0]\r\n logging.info('Train pos-neg ratio is {:.4f}'.format(args.train_pos_neg_ratio))\r\n logging.info('Train/test number is {}/{}'.format(num_train, num_test))\r\n\r\n # if args.loss_func == 'hinge':\r\n # modify_dataset_for_hinge(train)\r\n # Set up the iterator.\r\n train_iter = SerialIterator(train, args.batchsize)\r\n test_iter = SerialIterator(test, args.batchsize,\r\n repeat=False, shuffle=False)\r\n\r\n metrics_fun = {'accuracy': F.binary_accuracy}\r\n loss_func = F.sigmoid_cross_entropy\r\n if args.loss_func == 'hinge':\r\n logging.info('Loss function is {}'.format(args.loss_func))\r\n loss_func = F.hinge\r\n metrics_fun = {'accuracy': F.accuracy}\r\n classifier = Classifier(predictor, lossfun=loss_func,\r\n metrics_fun=metrics_fun, device=args.gpu)\r\n\r\n # Set up the optimizer.\r\n optimizer = optimizers.Adam(alpha=args.learning_rate, weight_decay_rate=args.weight_decay_rate)\r\n # optimizer = optimizers.Adam()\r\n # optimizer = optimizers.SGD(lr=args.learning_rate)\r\n optimizer.setup(classifier)\r\n # add regularization\r\n if args.max_norm > 0:\r\n optimizer.add_hook(chainer.optimizer.GradientClipping(threshold=args.max_norm))\r\n if args.l2_rate > 0:\r\n optimizer.add_hook(chainer.optimizer.WeightDecay(rate=args.l2_rate))\r\n if args.l1_rate > 0:\r\n optimizer.add_hook(chainer.optimizer.Lasso(rate=args.l1_rate))\r\n\r\n # Set up the updater.\r\n if multi_gpu:\r\n logging.info('Using multiple GPUs')\r\n updater = training.ParallelUpdater(train_iter, optimizer, devices={'main': 0, 'second': 1},\r\n converter=concat_mols)\r\n else:\r\n logging.info('Using single GPU')\r\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu,\r\n converter=concat_mols)\r\n\r\n # Set up the trainer.\r\n logging.info('Training...')\r\n # add stop_trigger parameter\r\n early_stop = triggers.EarlyStoppingTrigger(monitor='validation/main/loss', patients=50, max_trigger=(500, 'epoch'))\r\n out = 'output' + '/' + args.out\r\n trainer = training.Trainer(updater, stop_trigger=early_stop, out=out)\r\n\r\n # trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)\r\n\r\n trainer.extend(E.Evaluator(test_iter, classifier,\r\n device=args.gpu, converter=concat_mols))\r\n\r\n train_eval_iter = SerialIterator(train, args.batchsize,\r\n repeat=False, shuffle=False)\r\n\r\n trainer.extend(AccuracyEvaluator(\r\n train_eval_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='train_acc',\r\n pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # extension name='validation' is already used by `Evaluator`,\r\n # instead extension name `val` is used.\r\n trainer.extend(AccuracyEvaluator(\r\n test_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='val_acc',\r\n pos_labels=1, ignore_labels=-1))\r\n\r\n trainer.extend(ROCAUCEvaluator(\r\n train_eval_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='train_roc',\r\n pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # extension name='validation' is already used by `Evaluator`,\r\n # instead extension name `val` is used.\r\n trainer.extend(ROCAUCEvaluator(\r\n test_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='val_roc',\r\n pos_labels=1, ignore_labels=-1))\r\n\r\n trainer.extend(PRCAUCEvaluator(\r\n train_eval_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='train_prc',\r\n pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # extension name='validation' is already used by `Evaluator`,\r\n # instead extension name `val` is used.\r\n trainer.extend(PRCAUCEvaluator(\r\n test_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='val_prc',\r\n pos_labels=1, ignore_labels=-1))\r\n\r\n # trainer.extend(PrecisionEvaluator(\r\n # train_eval_iter, classifier, eval_func=predictor,\r\n # device=args.gpu, converter=concat_mols, name='train_p',\r\n # pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # # extension name='validation' is already used by `Evaluator`,\r\n # # instead extension name `val` is used.\r\n # trainer.extend(PrecisionEvaluator(\r\n # val_iter, classifier, eval_func=predictor,\r\n # device=args.gpu, converter=concat_mols, name='val_p',\r\n # pos_labels=1, ignore_labels=-1))\r\n #\r\n # trainer.extend(RecallEvaluator(\r\n # train_eval_iter, classifier, eval_func=predictor,\r\n # device=args.gpu, converter=concat_mols, name='train_r',\r\n # pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # # extension name='validation' is already used by `Evaluator`,\r\n # # instead extension name `val` is used.\r\n # trainer.extend(RecallEvaluator(\r\n # val_iter, classifier, eval_func=predictor,\r\n # device=args.gpu, converter=concat_mols, name='val_r',\r\n # pos_labels=1, ignore_labels=-1))\r\n\r\n trainer.extend(F1Evaluator(\r\n train_eval_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='train_f',\r\n pos_labels=1, ignore_labels=-1, raise_value_error=False))\r\n # extension name='validation' is already used by `Evaluator`,\r\n # instead extension name `val` is used.\r\n trainer.extend(F1Evaluator(\r\n test_iter, classifier, eval_func=predictor,\r\n device=args.gpu, converter=concat_mols, name='val_f',\r\n pos_labels=1, ignore_labels=-1))\r\n\r\n # apply shift strategy to learning rate every 10 epochs\r\n # trainer.extend(E.ExponentialShift('alpha', args.exp_shift_rate), trigger=(10, 'epoch'))\r\n if args.exp_shift_strategy == 1:\r\n trainer.extend(E.ExponentialShift('alpha', args.exp_shift_rate),\r\n trigger=triggers.ManualScheduleTrigger([10, 20, 30, 40, 50, 60], 'epoch'))\r\n elif args.exp_shift_strategy == 2:\r\n trainer.extend(E.ExponentialShift('alpha', args.exp_shift_rate),\r\n trigger=triggers.ManualScheduleTrigger([5, 10, 15, 20, 25, 30], 'epoch'))\r\n elif args.exp_shift_strategy == 3:\r\n trainer.extend(E.ExponentialShift('alpha', args.exp_shift_rate),\r\n trigger=triggers.ManualScheduleTrigger([5, 10, 15, 20, 25, 30, 40, 50, 60, 70], 'epoch'))\r\n else:\r\n raise ValueError('No such strategy to adapt learning rate')\r\n # # observation of learning rate\r\n trainer.extend(E.observe_lr(), trigger=(1, 'iteration'))\r\n\r\n entries = [\r\n 'epoch',\r\n 'main/loss', 'train_acc/main/accuracy', 'train_roc/main/roc_auc', 'train_prc/main/prc_auc',\r\n # 'train_p/main/precision', 'train_r/main/recall',\r\n 'train_f/main/f1',\r\n 'validation/main/loss', 'val_acc/main/accuracy', 'val_roc/main/roc_auc', 'val_prc/main/prc_auc',\r\n # 'val_p/main/precision', 'val_r/main/recall',\r\n 'val_f/main/f1',\r\n 'lr',\r\n 'elapsed_time']\r\n trainer.extend(E.PrintReport(entries=entries))\r\n # change from 10 to 2 on Mar. 1 2019\r\n trainer.extend(E.snapshot(), trigger=(2, 'epoch'))\r\n trainer.extend(E.LogReport())\r\n trainer.extend(E.ProgressBar())\r\n trainer.extend(E.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png'))\r\n trainer.extend(E.PlotReport(['train_acc/main/accuracy', 'val_acc/main/accuracy'], 'epoch', file_name='accuracy.png'))\r\n\r\n if args.resume:\r\n resume_path = os.path.join(out, args.resume)\r\n logging.info('Resume training according to snapshot in {}'.format(resume_path))\r\n chainer.serializers.load_npz(resume_path, trainer)\r\n\r\n trainer.run()\r\n\r\n # Save the regressor's parameters.\r\n model_path = os.path.join(out, args.model_filename)\r\n logging.info('Saving the trained models to {}...'.format(model_path))\r\n classifier.save_pickle(model_path, protocol=args.protocol)\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.info(ROOT_PATH)\r\n\r\n main()\r\n"
] | [
[
"numpy.expand_dims",
"numpy.random.seed",
"numpy.asarray",
"matplotlib.use",
"numpy.squeeze",
"numpy.arange",
"numpy.random.shuffle",
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mhannani/ZinVert | [
"d54e1ab1980ed70945c34d2ceb294d559126f623"
] | [
"src/utils/create_seq2seq.py"
] | [
"import torch.nn as nn\nfrom torch.optim import Adam\nfrom src.models.Seq2seq import Seq2Seq\nfrom src.models.Decoder import Decoder, OneStepDecoder, OneStepDecoderWithAttention, DecoderWithAttention\nfrom src.models.Encoder import Encoder, EncoderAttention\nfrom src.models.Attention import Attention\nfrom src.data.config import *\n\n\ndef create_seq2seq(src_vocab, tgt_vocab):\n \"\"\"\n Creates encoder, decoder, defines optimizer, and loss function.\n :param src_vocab: torchtext.vocab.vocab.Vocab\n source language vocabulary\n :param tgt_vocab: torchtext.vocab.vocab.Vocab\n target language vocabulary\n :return: model, optimizer, criterion\n see : https://datascience.stackexchange.com/questions/10250/what-is-the-difference-between-objective-error-criterion-cost-loss-fun/10263\n \"\"\"\n\n # vocabularies size\n src_vocab__len = len(src_vocab)\n tgt_vocab__len = len(tgt_vocab)\n\n # encoder model\n encoder = Encoder(src_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, N_LAYERS, DROPOUT)\n\n # one step decoder model\n one_step_decoder = OneStepDecoder(tgt_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM)\n\n # decoder model\n decoder = Decoder(one_step_decoder, device=DEVICE)\n\n # encoder -> decoder\n seq2seq = Seq2Seq(encoder, decoder)\n\n # move the model to device\n seq2seq.to(DEVICE)\n\n # Adam optimizer\n optimizer = Adam(seq2seq.parameters())\n\n # ignore padding indices\n # TGT_PAD_IDX = tgt_vocab.lookup_indices([SPECIAL_SYMBOLS[PAD_IDX]])[0]\n TGT_PAD_IDX = 1\n\n # loss function\n criterion = nn.CrossEntropyLoss(ignore_index=TGT_PAD_IDX)\n\n return seq2seq, optimizer, criterion\n\n\ndef create_seq2seq_with_att(src_vocab, tgt_vocab):\n \"\"\"\n Creates encoder, decoder, defines optimizer, and loss function with the attention mechanism\n :param src_vocab: torchtext.vocab.vocab.Vocab\n source language vocabulary\n :param tgt_vocab: torchtext.vocab.vocab.Vocab\n target language vocabulary\n :return: model, optimizer, criterion\n see : https://datascience.stackexchange.com/questions/10250/what-is-the-difference-between-objective-error-criterion-cost-loss-fun/10263\n \"\"\"\n\n # vocabularies size\n src_vocab__len = len(src_vocab.vocab)\n tgt_vocab__len = len(tgt_vocab.vocab)\n\n # encoder model\n encoder = EncoderAttention(src_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, N_LAYERS, DROPOUT)\n\n # attention model\n attention = Attention(HIDDEN_DIM, HIDDEN_DIM)\n\n # one step decoder model\n one_step_decoder = OneStepDecoderWithAttention(tgt_vocab__len, EMBEDDING_SIZE, HIDDEN_DIM, HIDDEN_DIM, attention)\n\n # decoder model\n decoder = DecoderWithAttention(one_step_decoder, device='cpu')\n\n # encoder -> decoder\n seq2seq = Seq2Seq(encoder, decoder)\n\n # move the model to device\n seq2seq.to('cpu')\n\n # Adam optimizer\n optimizer = Adam(seq2seq.parameters())\n\n # ignore padding indices\n # TGT_PAD_IDX = tgt_vocab.lookup_indices([SPECIAL_SYMBOLS[PAD_IDX]])[0]\n TGT_PAD_IDX = 1\n\n # loss function\n criterion = nn.CrossEntropyLoss(ignore_index=TGT_PAD_IDX)\n\n return seq2seq, optimizer, criterion\n\n\n\n\n\n\n\n\n"
] | [
[
"torch.nn.CrossEntropyLoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bhlarson/EmbeddedClassification | [
"68ede2d08c9e110c37ebb5c31a5e4c5d1abc52f2",
"68ede2d08c9e110c37ebb5c31a5e4c5d1abc52f2"
] | [
"infer_imdb_tfl.py",
"infer_imdb.py"
] | [
"\n\n\"\"\"Train a Resnet model for age classification and gender regression from the imdb dataset.\"\"\"\n#from __future__ import absolute_import\n#from __future__ import division\n#from __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nimport shutil\nimport glob\nimport cv2\nimport numpy as np\nimport datetime\n\nUSE_TFL = False\nif USE_TFL:\n import tflite_runtime.interpreter as tflite\nelse:\n import tensorflow as tf\n\nprint('Python Version {}'.format(sys.version))\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--debug', action='store_true', help='Wait for debugger attach')\nparser.add_argument('--model', type=str, default='./tflite/1589806577_int8.tflite', help='Model path')\n\nparser.add_argument('--data_dir', type=str, \n #default='/home/mendel/data/imdb',\n default='/store/Datasets/imdb/imdb_crop/18',\n help='Path to the directory containing the imdb data tf record.')\n\nparser.add_argument('--match', type=str, default='*',\n help='File wildcard')\n\n\n_HEIGHT = 200\n_WIDTH = 200\n_DEPTH = 3\n\n\ndef get_filenames(data_dir, ext):\n \"\"\"Return a list of filenames.\n\n Args:\n is_training: A boolean denoting whether the input is for training.\n data_dir: path to the the directory containing the input data.\n\n Returns:\n A list of file names.\n \"\"\"\n return glob.glob(os.path.join(data_dir, ext))\n\ndef build_engine(FLAGS):\n uff_model = uff.from_tensorflow_frozen_model(FLAGS.model, debug_mode=True, return_graph_info=True)\n with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:\n builder.max_workspace_size = 1 << 30\n builder.fp16_mode = True\n builder.max_batch_size = 1\n parser.register_input(\"Input\", (3, _WIDTH, _HEIGHT))\n parser.register_output(\"MarkOutput_0\")\n parser.parse(uff_model_path, network)\n \n print(\"Building TensorRT engine, this may take a few minutes...\")\n trt_engine = builder.build_cuda_engine(network)\n \n\ndef main(FLAGS):\n\n if USE_TFL:\n interpreter = tflite.Interpreter(model_path=FLAGS.model)\n else:\n interpreter = tf.lite.Interpreter(model_path=FLAGS.model)\n \n interpreter.allocate_tensors()\n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details() \n\n #loaded = tf.saved_model.load(FLAGS.model)\n #print(list(loaded.signatures.keys()))\n #infer = loaded.signatures[\"serving_default\"]\n #print(infer.structured_outputs)\n #print (infer.inputs[0])\n imgs = get_filenames(FLAGS.data_dir, FLAGS.match)\n\n img = cv2.imread(imgs[0])\n imgShape = img.shape\n center = np.array([imgShape[1]/2, imgShape[0]/2])\n d = np.array([_HEIGHT/2,_WIDTH/2])\n p1 = tuple((center-d).astype(int))\n p1 = (max(p1[0],0),max(p1[1],0))\n p2 = tuple((center+d).astype(int))\n p2 = (min(p2[0],imgShape[0]-1),min(p2[1],imgShape[1]-1))\n crop = cv2.resize(img[p1[1]:p2[1], p1[0]:p2[0]],(_WIDTH,_HEIGHT))\n interpreter.set_tensor(input_details[0]['index'], crop.astype(np.float32))\n interpreter.invoke()\n\n # The function `get_tensor()` returns a copy of the tensor data.\n # Use `tensor()` in order to get a pointer to the tensor.\n age = interpreter.get_tensor(output_details[0]['index'])[0][0]\n\n gender = 'male'\n if(interpreter.get_tensor(output_details[1]['index'])[0] < 1):\n gender = 'female'\n\n print('{}:{}, {}:{}'.format(output_details[0]['name'], age, output_details[1]['name'],gender))\n\n start_time = datetime.datetime.now()\n for i, imfile in enumerate(imgs):\n img = cv2.imread(imfile)\n imgShape = img.shape\n center = np.array([imgShape[1]/2, imgShape[0]/2])\n d = np.array([_HEIGHT/2,_WIDTH/2])\n p1 = tuple((center-d).astype(int))\n p1 = (max(p1[0],0),max(p1[1],0))\n p2 = tuple((center+d).astype(int))\n p2 = (min(p2[0],imgShape[0]-1),min(p2[1],imgShape[1]-1))\n crop = cv2.resize(img[p1[1]:p2[1], p1[0]:p2[0]],(_WIDTH,_HEIGHT))\n interpreter.set_tensor(input_details[0]['index'], crop.astype(np.float32))\n interpreter.invoke()\n\n\n age = interpreter.get_tensor(output_details[0]['index'])[0][0]\n\n gender = 'male'\n if(interpreter.get_tensor(output_details[1]['index'])[0] < 1):\n gender = 'female'\n\n print('{}:{}, {}:{} file {}'.format(output_details[0]['name'], age, output_details[1]['name'],gender, imfile))\n\n analysis_done = datetime.datetime.now()\n total_time = (analysis_done-start_time).total_seconds()\n\n print('average image time {}'.format(total_time/len(imgs)))\n\nif __name__ == '__main__':\n FLAGS, unparsed = parser.parse_known_args()\n\n if FLAGS.debug:\n # https://code.visualstudio.com/docs/python/debugging#_remote-debugging\n # Launch applicaiton on remote computer: \n # > python3 -m ptvsd --host 0.0.0.0 --port 3000 --wait predict_imdb.py\n import ptvsd\n # Allow other computers to attach to ptvsd at this IP address and port.\n ptvsd.enable_attach(address=('0.0.0.0', 3000), redirect_output=True)\n # Pause the program until a remote debugger is attached\n print(\"Wait for debugger attach\")\n ptvsd.wait_for_attach()\n print(\"Debugger Attached\")\n\n main(FLAGS)\n print('complete')\n",
"\"\"\"Train a Resnet model for age classification and gender regression from the imdb dataset.\"\"\"\n#from __future__ import absolute_import\n#from __future__ import division\n#from __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nimport shutil\nimport glob\nimport cv2\nimport datetime\n#import tensorrt as trt\n#import uff\n#import pycuda.driver as cuda\n#import pycuda.autoinit\nimport tensorflow as tf\n\nprint('Python Version {}'.format(sys.version))\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--debug', type=bool, default=False, help='Wait for debugge attach')\n\nparser.add_argument('--model', type=str, default='./saved_model/1588440354',\n help='Base directory for the model.')\n\nparser.add_argument('--data_dir', type=str, \n default='/store/Datasets/imdb/imdb_crop/00/',\n #default='C:\\\\data\\\\datasets\\\\imdb',\n help='Path to the directory containing the imdb data tf record.')\n\nparser.add_argument('--match', type=str, default='*',\n help='File wildcard')\n\n# Pre-trained models: https://github.com/tensorflow/models/blob/master/research/slim/README.md\nparser.add_argument('--pre_trained_model', type=str, \n default='/store/training/resnet_v2_101_2017_04_14/resnet_v2_101.ckpt',\n #default='C:\\\\data\\\\training\\\\resnet_v2_101_2017_04_14\\\\resnet_v2_101.ckpt',\n help='Path to the pre-trained model checkpoint.')\n\n\n_HEIGHT = 200\n_WIDTH = 200\n_DEPTH = 3\n\n\ndef get_filenames(data_dir, ext):\n \"\"\"Return a list of filenames.\n\n Args:\n is_training: A boolean denoting whether the input is for training.\n data_dir: path to the the directory containing the input data.\n\n Returns:\n A list of file names.\n \"\"\"\n return glob.glob(os.path.join(data_dir, ext))\n\ndef build_engine(FLAGS):\n uff_model = uff.from_tensorflow_frozen_model(FLAGS.model, debug_mode=True, return_graph_info=True)\n with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:\n builder.max_workspace_size = 1 << 30\n builder.fp16_mode = True\n builder.max_batch_size = 1\n parser.register_input(\"Input\", (3, _WIDTH, _HEIGHT))\n parser.register_output(\"MarkOutput_0\")\n parser.parse(uff_model_path, network)\n \n print(\"Building TensorRT engine, this may take a few minutes...\")\n trt_engine = builder.build_cuda_engine(network)\n \n\ndef main(FLAGS):\n #TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)\n\n #engine = build_engine(FLAGS)\n\n loaded = tf.saved_model.load(FLAGS.model)\n print(list(loaded.signatures.keys()))\n infer = loaded.signatures[\"serving_default\"]\n print(infer.structured_outputs)\n print (infer.inputs[0])\n imgs = get_filenames(FLAGS.data_dir, FLAGS.match)\n\n img = cv2.imread(imgs[0])\n tfimg = tf.image.resize_with_crop_or_pad(tf.constant(img), _HEIGHT, _WIDTH)\n outputs = infer(tfimg)\n\n start_time = datetime.datetime.now()\n for i, imfile in enumerate(imgs):\n img = cv2.imread(imfile)\n tfimg = tf.image.resize_with_crop_or_pad(tf.constant(img), _HEIGHT, _WIDTH)\n outputs = infer(tfimg)\n\n print('{}: pred_age {}, pred_gender {}, '.format(i, outputs['pred_age'].numpy()[0,0],outputs['pred_gender'].numpy()[0]))\n analysis_done = datetime.datetime.now()\n total_time = (analysis_done-start_time).total_seconds()\n\n print('average image time {}'.format(total_time/len(imgs)))\n\nif __name__ == '__main__':\n FLAGS, unparsed = parser.parse_known_args()\n\n if FLAGS.debug:\n # https://code.visualstudio.com/docs/python/debugging#_remote-debugging\n # Launch applicaiton on remote computer: \n # > python3 -m ptvsd --host 0.0.0.0 --port 3000 --wait predict_imdb.py\n import ptvsd\n # Allow other computers to attach to ptvsd at this IP address and port.\n ptvsd.enable_attach(address=('0.0.0.0', 3000), redirect_output=True)\n # Pause the program until a remote debugger is attached\n print(\"Wait for debugger attach\")\n ptvsd.wait_for_attach()\n print(\"Debugger Attached\")\n\n main(FLAGS)\n print('complete')\n"
] | [
[
"tensorflow.lite.Interpreter",
"numpy.array"
],
[
"tensorflow.constant",
"tensorflow.saved_model.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
b-fontana/law | [
"8fca50fc1aa54647e4abd6dec4ff5d8ac2622865"
] | [
"law/contrib/keras/formatter.py"
] | [
"# coding: utf-8\n\n\"\"\"\nKeras target formatters.\n\"\"\"\n\n\n__all__ = [\"KerasModelFormatter\", \"TFKerasModelFormatter\"]\n\n\nfrom law.target.formatter import Formatter\nfrom law.target.file import get_path\n\n\nclass ModelFormatter(Formatter):\n\n @classmethod\n def accepts(cls, path):\n return get_path(path).endswith(\".h5\")\n\n @classmethod\n def dump(cls, path, model, *args, **kwargs):\n model.save(path, *args, **kwargs)\n\n\nclass KerasModelFormatter(ModelFormatter):\n\n name = \"keras\"\n\n @classmethod\n def load(cls, path, *args, **kwargs):\n from keras.models import load_model\n return load_model(path, *args, **kwargs)\n\n\nclass TFKerasModelFormatter(ModelFormatter):\n\n name = \"tf_keras\"\n\n @classmethod\n def load(cls, path, *args, **kwargs):\n from tensorflow import keras\n return keras.models.load_model(path, *args, **kwargs)\n"
] | [
[
"tensorflow.keras.models.load_model"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
ir5/chainer-compiler | [
"c6d9b9ba3175931321c1e512c17642a613c03bfc",
"c6d9b9ba3175931321c1e512c17642a613c03bfc",
"c6d9b9ba3175931321c1e512c17642a613c03bfc"
] | [
"testcases/elichika_tests/model/EspNet_BLSTM.py",
"utils/run_onnx_ngraph.py",
"scripts/large_models/chainer_chain.py"
] | [
"#!/usr/bin/env python\n#\n# BLSTM from EspNet's e2e_asr.py.\n#\n\nimport argparse\nimport datetime\nimport logging\n\nimport numpy as np\n\nimport chainer\nfrom chainer.backends import cuda\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\n\nfrom testcases.elichika_tests.utils import sequence_utils\n\n\nclass BLSTM(chainer.Chain):\n def __init__(self, idim, elayers, cdim, hdim, dropout):\n super(BLSTM, self).__init__()\n with self.init_scope():\n self.nblstm = L.NStepBiLSTM(elayers, idim, cdim, dropout)\n self.l_last = L.Linear(cdim * 2, hdim)\n\n def forward(self, xs, ilens):\n '''BLSTM forward (the modified version)\n\n :param xs:\n :param ilens:\n :return:\n '''\n logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))\n # need to move ilens to cpu\n ilens = cuda.to_cpu(ilens)\n hy, cy, ys = self.nblstm(None, None, xs)\n ys = self.l_last(F.vstack(ys)) # (sum _utt frame_utt) x dim\n xs = F.split_axis(ys, np.cumsum(ilens[:-1]), axis=0)\n del hy, cy\n\n # final tanh operation\n xs = F.split_axis(F.tanh(F.vstack(xs)), np.cumsum(ilens[:-1]), axis=0)\n\n # EDIT(hamaji): Unnecessary, as `force_tuple` is True by default.\n # # 1 utterance case, it becomes an array, so need to make a utt tuple\n # if not isinstance(xs, tuple):\n # xs = [xs]\n\n return xs, ilens # x: utt list of frame x dim\n\n def original(self, xs, ilens):\n '''BLSTM forward (the original implementation)\n\n :param xs:\n :param ilens:\n :return:\n '''\n logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))\n # need to move ilens to cpu\n ilens = cuda.to_cpu(ilens)\n hy, cy, ys = self.nblstm(None, None, xs)\n ys = self.l_last(F.vstack(ys)) # (sum _utt frame_utt) x dim\n xs = F.split_axis(ys, np.cumsum(ilens[:-1]), axis=0)\n del hy, cy\n\n # final tanh operation\n xs = F.split_axis(F.tanh(F.vstack(xs)), np.cumsum(ilens[:-1]), axis=0)\n\n # 1 utterance case, it becomes an array, so need to make a utt tuple\n if not isinstance(xs, tuple):\n xs = [xs]\n\n return xs, ilens # x: utt list of frame x dim\n\n\nclass BLSTMBackprop(chainer.Chain):\n def __init__(self, idim, elayers, cdim, hdim, dropout):\n super(BLSTMBackprop, self).__init__()\n with self.init_scope():\n self.blstm = BLSTM(idim, elayers, cdim, hdim, dropout)\n\n def forward(self, xs, ilens):\n xs, ilens = self.blstm(xs, ilens)\n return F.pad_sequence(xs)\n\n\nfrom chainer_compiler.elichika import testtools\n\n\ndef main():\n import numpy as np\n np.random.seed(314)\n\n idim = 5\n elayers = 2\n cdim = 3\n hdim = 7\n batch_size = 3\n sequence_length = 4\n num_vocabs = 10\n\n model = BLSTM(idim, elayers, cdim, hdim, 0)\n labels, ilens = sequence_utils.gen_random_sequence(\n batch_size, sequence_length, num_vocabs)\n xs = []\n for l in ilens:\n xs.append(np.random.rand(l, idim).astype(dtype=np.float32))\n\n # Check if our modification is valid.\n expected = model.original(xs, ilens)\n actual = model.forward(xs, ilens)\n for e, a in zip(expected[0], actual[0]):\n assert np.allclose(e.array, a.array)\n assert np.allclose(expected[1], actual[1])\n\n testtools.generate_testcase(model, [xs, ilens])\n\n testtools.generate_testcase(BLSTMBackprop(idim, elayers, cdim, hdim, 0),\n [xs, ilens], backprop=True)\n\nif __name__ == '__main__':\n main()\n",
"import argparse\nimport glob\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nimport onnx\nimport onnx.numpy_helper\nimport ngraph as ng\nfrom ngraph_onnx.onnx_importer.importer import import_onnx_model\n\nimport run_onnx_util\n\n\ndef load_test_data(data_dir, input_names, output_names):\n inout_values = []\n for kind, names in [('input', input_names), ('output', output_names)]:\n names = list(names)\n values = []\n for pb in sorted(glob.glob(os.path.join(data_dir, '%s_*.pb' % kind))):\n with open(pb, 'rb') as f:\n tensor = onnx.TensorProto()\n tensor.ParseFromString(f.read())\n if tensor.name in names:\n name = tensor.name\n names.remove(name)\n else:\n name = names.pop(0)\n values.append((name, onnx.numpy_helper.to_array(tensor)))\n inout_values.append(values)\n return tuple(inout_values)\n\n\ndef compile(symbol, target, input_names, inputs, params, opt_level):\n shape_dict = {}\n dtype_dict = {}\n for name, value in zip(input_names, inputs.values()):\n shape_dict[name] = value.shape\n dtype_dict[name] = value.dtype\n for name, value in params.items():\n shape_dict[name] = value.shape\n dtype_dict[name] = value.dtype\n with nnvm.compiler.build_config(opt_level=opt_level):\n graph, lib, params = nnvm.compiler.build(symbol, target,\n shape=shape_dict,\n dtype=dtype_dict,\n params=params)\n return graph, lib, params\n\n\ndef onnx_input_output_names(onnx_filename):\n onnx_model = onnx.load(onnx_filename)\n initializer_names = set()\n for initializer in onnx_model.graph.initializer:\n initializer_names.add(initializer.name)\n\n input_names = []\n for input in onnx_model.graph.input:\n if input.name not in initializer_names:\n input_names.append(input.name)\n\n output_names = []\n for output in onnx_model.graph.output:\n output_names.append(output.name)\n\n return input_names, output_names\n\n\ndef run(args):\n onnx_filename = os.path.join(args.test_dir, args.model_file)\n input_names, output_names = onnx_input_output_names(onnx_filename)\n test_data_dir = os.path.join(args.test_dir, 'test_data_set_0')\n inputs, outputs = load_test_data(test_data_dir, input_names, output_names)\n\n model = onnx.load(onnx_filename)\n ng_func = import_onnx_model(model)\n\n runtime = ng.runtime(backend_name=args.backend)\n computation = runtime.computation(ng_func)\n\n inputs = [v for n, v in inputs]\n outputs = [v for n, v in outputs]\n\n actual_outputs = computation(*inputs)\n\n for i, (name, expected, actual) in enumerate(\n zip(output_names, outputs, actual_outputs)):\n np.testing.assert_allclose(expected, actual,\n rtol=1e-3, atol=1e-4), name\n print('%s: OK' % name)\n print('ALL OK')\n\n def compute():\n computation(*inputs)\n\n return run_onnx_util.run_benchmark(compute, args.iterations)\n\n\ndef get_args(args=None):\n parser = argparse.ArgumentParser(description='Run ONNX by nGraph')\n parser.add_argument('test_dir')\n parser.add_argument('--backend', '-b', default='CPU')\n parser.add_argument('--debug', '-g', action='store_true')\n parser.add_argument('--iterations', '-I', type=int, default=1)\n parser.add_argument('--model_file', default='model.onnx')\n return parser.parse_args(args=args)\n\n\ndef main():\n args = get_args()\n\n if args.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"import collections\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport numpy as np\n\nfrom chainer.functions.activation.relu import relu\nfrom chainer.functions.activation.softmax import softmax\nfrom chainer.links.connection.convolution_2d import Convolution2D\nfrom chainer.links.connection.linear import Linear\nfrom chainer.functions.noise.dropout import dropout\nfrom chainer.utils import argument\n\n\nclass Wrapper(chainer.Chain):\n\n def __init__(self, predictor, key=None):\n super().__init__()\n\n with self.init_scope():\n self.predictor = predictor\n self.key = key\n\n def __call__(self, x, t):\n if self.key is None:\n y = self.predictor(x)\n else:\n y = self.predictor(x, layers=[self.key])[self.key]\n y = F.softmax_cross_entropy(y, t)\n return y\n\n\n# Code is copied from chainer.links.model.\nclass VGGLayers(chainer.Chain):\n\n def __init__(self, pretrained_model='auto', n_layers=16):\n super(VGGLayers, self).__init__()\n kwargs = {}\n\n if n_layers not in [16, 19]:\n raise ValueError(\n 'The n_layers argument should be either 16 or 19,'\n 'but {} was given.'.format(n_layers)\n )\n\n with self.init_scope():\n self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs)\n self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs)\n self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs)\n self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs)\n self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs)\n self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs)\n self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs)\n self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs)\n self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)\n self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)\n self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs)\n self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)\n self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)\n self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs)\n self.fc7 = Linear(4096, 4096, **kwargs)\n self.fc8 = Linear(4096, 1000, **kwargs)\n if n_layers == 19:\n self.conv3_4 = Convolution2D(256, 256, 3, 1, 1, **kwargs)\n self.conv4_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs)\n self.conv5_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs)\n\n @property\n def functions(self):\n # This class will not be used directly.\n raise NotImplementedError\n\n @property\n def available_layers(self):\n return list(self.functions.keys())\n\n def forward(self, x, layers=None, **kwargs):\n if layers is None:\n layers = ['prob']\n\n if kwargs:\n argument.check_unexpected_kwargs(\n kwargs, test='test argument is not supported anymore. '\n 'Use chainer.using_config'\n )\n argument.assert_kwargs_empty(kwargs)\n\n h = x\n activations = {}\n target_layers = set(layers)\n for key, funcs in self.functions.items():\n if len(target_layers) == 0:\n break\n for func in funcs:\n h = func(h)\n if key in target_layers:\n activations[key] = h\n target_layers.remove(key)\n return activations\n\n\ndef _max_pooling_2d(x):\n return F.max_pooling_2d(x, ksize=2)\n\n\nclass VGG16Layers(VGGLayers):\n\n def __init__(self, pretrained_model='auto'):\n super(VGG16Layers, self).__init__(pretrained_model, 16)\n\n @property\n def functions(self):\n return collections.OrderedDict([\n ('conv1_1', [self.conv1_1, relu]),\n ('conv1_2', [self.conv1_2, relu]),\n ('pool1', [_max_pooling_2d]),\n ('conv2_1', [self.conv2_1, relu]),\n ('conv2_2', [self.conv2_2, relu]),\n ('pool2', [_max_pooling_2d]),\n ('conv3_1', [self.conv3_1, relu]),\n ('conv3_2', [self.conv3_2, relu]),\n ('conv3_3', [self.conv3_3, relu]),\n ('pool3', [_max_pooling_2d]),\n ('conv4_1', [self.conv4_1, relu]),\n ('conv4_2', [self.conv4_2, relu]),\n ('conv4_3', [self.conv4_3, relu]),\n ('pool4', [_max_pooling_2d]),\n ('conv5_1', [self.conv5_1, relu]),\n ('conv5_2', [self.conv5_2, relu]),\n ('conv5_3', [self.conv5_3, relu]),\n ('pool5', [_max_pooling_2d]),\n # ('fc6', [self.fc6, relu, dropout]),\n ('fc6', [self.fc6, relu, lambda x: dropout(x, ratio=0.0)]),\n # ('fc7', [self.fc7, relu, dropout]),\n ('fc7', [self.fc7, relu, lambda x: dropout(x, ratio=0.0)]),\n ('fc8', [self.fc8]),\n ('prob', [softmax]),\n ])\n\n\nclass VGG19Layers(VGGLayers):\n\n def __init__(self, pretrained_model='auto'):\n super(VGG19Layers, self).__init__(pretrained_model, 19)\n\n @property\n def functions(self):\n return collections.OrderedDict([\n ('conv1_1', [self.conv1_1, relu]),\n ('conv1_2', [self.conv1_2, relu]),\n ('pool1', [_max_pooling_2d]),\n ('conv2_1', [self.conv2_1, relu]),\n ('conv2_2', [self.conv2_2, relu]),\n ('pool2', [_max_pooling_2d]),\n ('conv3_1', [self.conv3_1, relu]),\n ('conv3_2', [self.conv3_2, relu]),\n ('conv3_3', [self.conv3_3, relu]),\n ('conv3_4', [self.conv3_4, relu]),\n ('pool3', [_max_pooling_2d]),\n ('conv4_1', [self.conv4_1, relu]),\n ('conv4_2', [self.conv4_2, relu]),\n ('conv4_3', [self.conv4_3, relu]),\n ('conv4_4', [self.conv4_4, relu]),\n ('pool4', [_max_pooling_2d]),\n ('conv5_1', [self.conv5_1, relu]),\n ('conv5_2', [self.conv5_2, relu]),\n ('conv5_3', [self.conv5_3, relu]),\n ('conv5_4', [self.conv5_4, relu]),\n ('pool5', [_max_pooling_2d]),\n # ('fc6', [self.fc6, relu, dropout]),\n ('fc6', [self.fc6, relu, lambda x: dropout(x, ratio=0.0)]),\n # ('fc7', [self.fc7, relu, dropout]),\n ('fc7', [self.fc7, relu, lambda x: dropout(x, ratio=0.0)]),\n ('fc8', [self.fc8]),\n ('prob', [softmax]),\n ])\n\n\ndef get_chainer_model(chainer_chain, dtype, key):\n batchsize = 4\n x = np.random.uniform(size=(batchsize, 3, 224, 224)).astype(dtype)\n t = np.random.randint(size=(batchsize,), low=0, high=1000).astype(np.int32)\n model = Wrapper(chainer_chain(pretrained_model=None), key)\n return model, [x, t]\n\n\ndef get_resnet50(dtype=None):\n return get_chainer_model(L.ResNet50Layers, dtype, 'fc6')\n\n\ndef get_resnet152(dtype=None):\n return get_chainer_model(L.ResNet152Layers, dtype, 'fc6')\n\n\ndef get_vgg16(dtype=None):\n return get_chainer_model(VGG16Layers, dtype, 'fc8')\n\n\ndef get_vgg19(dtype=None):\n return get_chainer_model(VGG19Layers, dtype, 'fc8')\n"
] | [
[
"numpy.random.rand",
"numpy.cumsum",
"numpy.allclose",
"numpy.random.seed"
],
[
"numpy.testing.assert_allclose"
],
[
"numpy.random.uniform",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gowthambalusamy/pytorch_text2speech | [
"7637a3542799b9f6503a203ed1e4990245402cc0"
] | [
"deepvoice3_pytorch/modules.py"
] | [
"# coding: utf-8\n\nimport torch\nfrom torch import nn\nimport math\nimport numpy as np\nfrom torch.nn import functional as F\nfrom fairseq.models.fconv import Linear, LinearizedConvolution\n\n\ndef position_encoding_init(n_position, d_pos_vec, position_rate=1.0):\n ''' Init the sinusoid position encoding table '''\n\n # keep dim 0 for padding token position encoding zero vector\n position_enc = np.array([\n [position_rate * pos / np.power(10000, 2 * i / d_pos_vec) for i in range(d_pos_vec)]\n if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])\n\n position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i\n position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1\n return torch.from_numpy(position_enc).type(torch.FloatTensor)\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n m.weight.data.normal_(0, 0.01)\n return m\n\n\ndef Conv1d(in_channels, out_channels, kernel_size, dropout=0, std_mul=4.0, **kwargs):\n from .conv import Conv1d\n m = Conv1d(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((std_mul * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m)\n\n\ndef ConvTranspose1d(in_channels, out_channels, kernel_size, dropout=0,\n std_mul=1.0, **kwargs):\n m = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((std_mul * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m)\n\n\ndef LinearizedConv1d(in_channels, out_channels, kernel_size, dilation=(1,),\n std_mul=4.0, dropout=0, **kwargs):\n \"\"\"Weight-normalized Conv1d layer optimized for decoding\"\"\"\n assert dilation[0] == 1\n m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((std_mul * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m)\n\n\ndef ConvTBC(in_channels, out_channels, kernel_size, dilation=(1,), std_mul=4.0,\n dropout=0, **kwargs):\n \"\"\"Weight-normalized Conv1d layer\"\"\"\n from fairseq.modules import ConvTBC\n assert dilation[0] == 1\n m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((std_mul * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n m.weight.data.normal_(mean=0, std=std)\n m.bias.data.zero_()\n return nn.utils.weight_norm(m, dim=2)\n\n\nclass HighwayConv1d(nn.Module):\n \"\"\"Weight normzlized Conv1d + Highway network (support incremental forward)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size=1, padding=None,\n dilation=1, causal=False, dropout=0, std_mul=None, glu=False):\n super(HighwayConv1d, self).__init__()\n if std_mul is None:\n std_mul = 4.0 if glu else 1.0\n if padding is None:\n # no future time stamps available\n if causal:\n padding = (kernel_size - 1) * dilation\n else:\n padding = (kernel_size - 1) // 2 * dilation\n self.causal = causal\n self.dropout = dropout\n self.glu = glu\n\n self.conv = Conv1d(in_channels, 2 * out_channels,\n kernel_size=kernel_size, padding=padding,\n dilation=dilation, dropout=dropout,\n std_mul=std_mul)\n\n def forward(self, x):\n return self._forward(x, False)\n\n def incremental_forward(self, x):\n return self._forward(x, True)\n\n def _forward(self, x, is_incremental):\n \"\"\"Forward\n\n Args:\n x: (B, in_channels, T)\n returns:\n (B, out_channels, T)\n \"\"\"\n\n residual = x\n x = F.dropout(x, p=self.dropout, training=self.training)\n if is_incremental:\n splitdim = -1\n x = self.conv.incremental_forward(x)\n else:\n splitdim = 1\n x = self.conv(x)\n # remove future time steps\n x = x[:, :, :residual.size(-1)] if self.causal else x\n\n if self.glu:\n x = F.glu(x, dim=splitdim)\n return (x + residual) * math.sqrt(0.5)\n else:\n a, b = x.split(x.size(splitdim) // 2, dim=splitdim)\n T = F.sigmoid(b)\n return (T * a + (1 - T) * residual)\n\n def clear_buffer(self):\n self.conv.clear_buffer()\n\n\ndef get_mask_from_lengths(memory, memory_lengths):\n \"\"\"Get mask tensor from list of length\n Args:\n memory: (batch, max_time, dim)\n memory_lengths: array like\n \"\"\"\n mask = memory.data.new(memory.size(0), memory.size(1)).byte().zero_()\n for idx, l in enumerate(memory_lengths):\n mask[idx][:l] = 1\n return ~mask\n"
] | [
[
"torch.nn.functional.glu",
"torch.nn.functional.dropout",
"numpy.power",
"torch.nn.utils.weight_norm",
"numpy.cos",
"torch.nn.Embedding",
"numpy.sin",
"torch.from_numpy",
"torch.nn.functional.sigmoid",
"torch.nn.ConvTranspose1d",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jdmoorman/clapsolver | [
"d3d2fa2bbb0722c78531ff8f0dc983a50e4b357f"
] | [
"benchmarks/utils.py"
] | [
"import random\n\nimport numpy as np\n\n\ndef uniform_matrix(shape, low=0.0, high=1.0):\n \"\"\"Generate a uniformly random matrix of the given shape.\"\"\"\n return np.random.uniform(low=low, high=high, size=shape)\n\n\ndef randint_matrix(shape, low=0, high=100):\n \"\"\"Generate a matrix of random integers of the given shape.\"\"\"\n return np.random.randint(low=low, high=high, size=shape)\n\n\ndef geometric_matrix(shape, low=0.0, high=1.0):\n \"\"\"Generate a geometric matrix of the given shape.\"\"\"\n n_rows, n_cols = shape\n A = np.random.uniform(low=low, high=high, size=(n_rows + n_cols,))\n B = np.random.uniform(low=low, high=high, size=(n_rows + n_cols,))\n\n A_mat = np.array(\n [[A[i] - A[n_rows + j] for j in range(n_cols)] for i in range(n_rows)]\n )\n B_mat = np.array(\n [[B[i] - B[n_rows + j] for j in range(n_cols)] for i in range(n_rows)]\n )\n\n return np.sqrt(A_mat ** 2 + B_mat ** 2) + 1.0\n\n\ndef machol_wien_matrix(shape):\n \"\"\"Generate a Machol-Wien matrix of the given shape.\"\"\"\n n_rows, n_cols = shape\n return np.array(\n [[i * j + 1 for j in range(1, n_cols + 1)] for i in range(1, n_rows + 1)]\n )\n\n\ndef random_machol_wien_matrix(shape, low=0.0, high=1.0):\n \"\"\"Generate a random Machol-Wien matrix of the given shape.\"\"\"\n n_rows, n_cols = shape\n return np.array(\n [\n [random.randint(1, i * j + 1) for j in range(1, n_cols + 1)]\n for i in range(1, n_rows + 1)\n ]\n )\n"
] | [
[
"numpy.random.uniform",
"numpy.sqrt",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ccj5351/DAFStereoNets | [
"66b720a4abbac9097a794eacef034bab641771d9",
"66b720a4abbac9097a794eacef034bab641771d9",
"66b720a4abbac9097a794eacef034bab641771d9"
] | [
"src/loaddata/cityscapes_loader.py",
"src/baselines/DispNet/main_DispNet_v0.py",
"src/loaddata/pascal_voc_loader.py"
] | [
"# !/usr/bin/env python3\n# -*-coding:utf-8-*-\n# @file: cityscapes_loader.py\n# @brief:\n# @author: Changjiang Cai, [email protected], [email protected]\n# @version: 0.0.1\n# @creation date: 25-01-2020\n# @last modified: Sun 26 Jan 2020 01:59:45 AM EST\n\nimport json\nimport os\nfrom collections import namedtuple\nimport zipfile\n\nimport torch\nfrom torchvision.datasets.utils import verify_str_arg, iterable_to_str\nfrom torchvision.datasets.vision import VisionDataset\nfrom PIL import Image\nfrom torchvision import transforms\nfrom torch.utils import data\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Based on https://github.com/mcordts/cityscapesScripts\nCityscapesClass = namedtuple('CityscapesClass', \n ['name', 'id', 'train_id', 'category', 'category_id',\n 'has_instances', 'ignore_in_eval', 'color'])\n\ncityscapes_labels = [\n # name, id, trainId, category, catId, hasInstances, ignoreInEval color\n CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),\n CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),\n CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),\n CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),\n CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),\n CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),\n CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),\n CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),\n CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),\n CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),\n CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),\n CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),\n CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),\n CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),\n CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),\n CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),\n CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),\n CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),\n CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),\n CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),\n CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),\n CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),\n CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),\n CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),\n CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),\n CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),\n CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),\n CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),\n CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),\n CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),\n CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),\n CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),\n CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),\n CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),\n CityscapesClass('license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)),\n]\n\n\ndef get_cityscapes_labels():\n \"\"\"Load the mapping that associates cityscapes classes with label colors\n\n Returns:\n np.ndarray with dimensions (34, 3)\n \"\"\"\n n_classes = len(cityscapes_labels) - 1 # remove the id=-1;\n assert (n_classes == 34)\n cs_clr_labels = np.zeros((n_classes, 3), np.uint8)\n for label in cityscapes_labels:\n if label.id >= 0:\n cs_clr_labels[label.id] = label.color\n\n return cs_clr_labels\n\ndef encode_cityscapes_segmap(mask):\n \"\"\"Encode segmentation label images as cityscapes classes\n\n Args:\n mask (np.ndarray): raw segmentation label image of dimension\n (M, N, 3), in which the cityscapes classes are encoded as colors.\n\n Returns:\n (np.ndarray): class map with dimensions (M,N), where the value at\n a given location is the integer denoting the class index.\n \"\"\"\n mask = mask.astype(int)\n label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)\n for ii, label in enumerate(get_cityscapes_labels()):\n label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii\n label_mask = label_mask.astype(int)\n return label_mask\n\ndef decode_cityscapes_segmap(label_mask, plot=False):\n \"\"\"Decode segmentation class labels into a color image\n\n Args:\n label_mask (np.ndarray): an (M,N) array of integer values denoting\n the class label at each spatial location.\n plot (bool, optional): whether to show the resulting color image\n in a figure.\n\n Returns:\n (np.ndarray, optional): the resulting decoded color image.\n \"\"\"\n label_colors = get_cityscapes_labels()\n #print (\"[???] label_colors shape = \", label_colors.shape)\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n n_classes = 34\n for ll in range(0, n_classes):\n r[label_mask == ll] = label_colors[ll, 0]\n g[label_mask == ll] = label_colors[ll, 1]\n b[label_mask == ll] = label_colors[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n #print ('[???] rgb shape = ', rgb.shape)\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb\n\ndef cityscape_batch_label2RGB(batch_label_mask):\n \"\"\"Decode segmentation class labels into a color image\n\n Args:\n batch_label_mask (np.ndarray): an (N, H, W) or (N,H,W,1), or (N,1,H,W) \n array of integer values denoting the class \n label at each spatial location.\n\n Returns:\n (np.ndarray, optional): the resulting decoded color image.\n \"\"\"\n #print (\"[???] batch_label_mask shape = \", batch_label_mask.shape )\n if batch_label_mask.shape[-1] == 1:\n channel_dim = -1\n elif batch_label_mask.shape[1] == 1:\n channel_dim = 1\n\n indices = np.squeeze(batch_label_mask, axis=channel_dim).astype(np.int32) # in shape [N, H, W]\n\n\n N, H, W = indices.shape[:]\n #print (\"[???] indices.shape = \", indices.shape)\n \n batch_label_rgb = np.zeros((N,H,W,3))\n #get cityscape colormap\n cmap = get_cityscapes_labels() # in shape (34, 3)\n #print ('[???] camp shape = ', cmap.shape)\n \"\"\" deal with invalid label values due to data augmentation \"\"\"\n #NOTE: due to data augmentation to label and image, e.g., random rotation,\n # so somewhere will have invalid label values (i.e., label > num_classes = 34),\n # e.g., after augmentation, label = 250, which is in valid, due to 250 > 34;\n # so we have to take this case into consideration !!!\n indices[indices >= 34] = 0\n for j in range(N):\n \n #NOTE:assert already verified!!!\n #tmp_max = np.amax(indices[j])\n #assert tmp_max < 34, 'batch_idx = {}, max_value_index = {}, max_val = {}'.format(\n # j, np.unravel_index(np.argmax(indices[j], axis=None), indices[j].shape), tmp_max)\n \n # gather\n batch_label_rgb[j,:,:,0] = np.take(cmap[:,0], indices[j]) / 255 # Red\n batch_label_rgb[j,:,:,1] = np.take(cmap[:,1], indices[j]) / 255 # Green\n batch_label_rgb[j,:,:,2] = np.take(cmap[:,2], indices[j]) / 255 # Blue\n \n return batch_label_rgb\n\n\n# > this code is adopted from TORCHVISION.DATASETS.CITYSCAPES, at \n# https://pytorch.org/docs/stable/_modules/torchvision/datasets/cityscapes.html;\n\nclass CityscapesLoader(data.Dataset):\n \"\"\"`Cityscapes <http://www.cityscapes-dataset.com/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory ``leftImg8bit``\n and ``gtFine`` or ``gtCoarse`` are located.\n split (string, optional): The image split to use, ``train``, ``test`` or ``val`` if mode=\"gtFine\"\n otherwise ``train``, ``train_extra`` or ``val``\n mode (string, optional): The quality mode to use, ``gtFine`` or ``gtCoarse``\n target_type (string): Type of target to use, ``instance``, ``semantic``, ``polygon``\n or ``color``. \n\n Examples:\n\n Get semantic segmentation target\n\n .. code-block:: python\n\n dataset = Cityscapes('./data/cityscapes', split='train', mode='fine',\n target_type='semantic')\n\n img, smnt = dataset[0]\n \n\n Validate on the \"coarse\" set\n\n .. code-block:: python\n\n dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse',\n target_type='semantic')\n\n img, smnt = dataset[0]\n \"\"\"\n\n \n def __init__(self, \n root, \n split='train', \n mode='fine', \n target_type='semantic',\n is_transform= True,\n img_size=161, # 512\n augmentations=None,\n ):\n super(CityscapesLoader, self).__init__()\n self.root = root\n self.mode = 'gtFine' if mode == 'fine' else 'gtCoarse'\n self.images_dir = os.path.join(self.root, 'leftImg8bit', split)\n self.targets_dir = os.path.join(self.root, self.mode, split)\n \n self.split = split\n self.images = []\n self.targets = []\n \n self.augmentations = augmentations\n self.is_transform = is_transform\n self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)\n self.tf = transforms.Compose(\n [\n #ToTensor(): Converts a PIL Image or numpy.ndarray (H x W x C) \n # in the range [0, 255] to a torch.FloatTensor of \n # shape (C x H x W) in the range [0.0, 1.0] if the \n # PIL Image belongs to one of the modes (L, LA, P, I, \n # F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray \n # has dtype = np.uint8\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n ) \n\n verify_str_arg(mode, \"mode\", (\"fine\", \"coarse\"))\n if mode == \"fine\":\n valid_modes = (\"train\", \"test\", \"val\")\n else:\n valid_modes = (\"train\", \"train_extra\", \"val\")\n msg = (\"Unknown value '{}' for argument split if mode is '{}'. \"\n \"Valid values are {{{}}}.\")\n msg = msg.format(split, mode, iterable_to_str(valid_modes))\n verify_str_arg(split, \"split\", valid_modes, msg)\n\n #if not isinstance(target_type, list):\n # self.target_type = [target_type]\n #else:\n # self.target_type = target_type\n #[verify_str_arg(value, \"target_type\",\n # (\"instance\", \"semantic\", \"polygon\", \"color\")) for value in self.target_type]\n self.target_type = target_type\n \n for city in os.listdir(self.images_dir):\n img_dir = os.path.join(self.images_dir, city)\n target_dir = os.path.join(self.targets_dir, city)\n for file_name in os.listdir(img_dir):\n target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],\n self._get_target_suffix(self.mode, self.target_type))\n self.targets.append(os.path.join(target_dir, target_name))\n self.images.append(os.path.join(img_dir, file_name))\n \n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is a tuple of all target types if target_type is a list with more\n than one item. Otherwise target is a json object if target_type=\"polygon\", else the image segmentation.\n \"\"\"\n #print (\"[???] loading \", self.images[index], ';', self.targets[index])\n image = Image.open(self.images[index]).convert('RGB')\n\n if self.target_type == 'polygon':\n target = self._load_json(self.targets[index])\n else:\n target = Image.open(self.targets[index])\n \n if self.augmentations is not None:\n image, target = self.augmentations(image, target)\n if self.is_transform:\n image, target = self.my_transform(image, target)\n\n # change [H, W] to [C = 1, H, W]\n #print (\"[***] expend lable shape : \", target.shape)\n target = torch.unsqueeze(target, 0)\n return image, target\n\n\n # added by CCJ:\n def my_transform(self, img, lbl):\n if self.img_size == (\"same\", \"same\"):\n pass\n else:\n img = img.resize((self.img_size[0], self.img_size[1]))# uint8 with RGB mode\n lbl = lbl.resize((self.img_size[0], self.img_size[1]))\n \n # do transform: totensor, and normalization\n img = self.tf(img) # in shape (C x H x W), in the range [0.0, 1.0];\n lbl = torch.from_numpy(np.array(lbl)).float()\n return img, lbl\n \n def __len__(self):\n return len(self.images)\n\n def extra_repr(self):\n lines = [\"Split: {split}\", \"Mode: {mode}\", \"Type: {target_type}\"]\n return '\\n'.join(lines).format(**self.__dict__)\n\n def _load_json(self, path):\n with open(path, 'r') as file:\n data = json.load(file)\n return data\n\n def _get_target_suffix(self, mode, target_type):\n if target_type == 'instance':\n return '{}_instanceIds.png'.format(mode)\n elif target_type == 'semantic':\n return '{}_labelIds.png'.format(mode)\n elif target_type == 'color':\n return '{}_color.png'.format(mode)\n else:\n return '{}_polygons.json'.format(mode)\n\n# Leave code for debugging purposes\nif __name__ == \"__main__\":\n #dummy main\n import src.augmentations as aug\n from six.moves import input\n bs = 2\n prob = 0.5\n augs = aug.Compose([aug.RandomRotate(10), aug.RandomHorizontallyFlip(prob)])\n #augs = None\n dst = CityscapesLoader(\n root = '/media/ccjData2/datasets/cityscapes/', \n split='train', \n mode='fine', \n target_type ='semantic',\n is_transform= True,\n img_size= 161, # 512\n augmentations= augs)\n trainloader = data.DataLoader(dst, batch_size=bs)\n for i, data in enumerate(trainloader):\n imgs, labels = data\n imgs = imgs.numpy()[:, ::-1, :, :]\n imgs = np.transpose(imgs, [0,2,3,1])\n\n #print ('imgs, labels: ', imgs.shape, labels.shape)\n if 0:\n f, axarr = plt.subplots(bs, 2)\n for j in range(bs):\n axarr[j][0].imshow(imgs[j])\n axarr[j][1].imshow(decode_cityscapes_segmap(\n np.squeeze(labels.numpy()[j], axis = 0)\n ))\n plt.show()\n a = input()\n if a == 'ex':\n break\n else:\n plt.close()\n if 1:\n print ('{}/{}'.format(i, len(trainloader)))\n batch_label_rgb = cityscape_batch_label2RGB(labels.numpy())\n",
"# !/usr/bin/env python3\n# -*-coding:utf-8-*-\n# @file: main_GCNet.py\n# @brief:\n# @author: Changjiang Cai, [email protected], [email protected]\n# @version: 0.0.1\n# @creation date: 07-01-2020\n# @last modified: Mon 20 Jan 2020 03:09:30 AM EST\n\nfrom __future__ import print_function\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport sys\nimport shutil\nimport os\nfrom os.path import join as pjoin\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport cv2\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\nfrom .loaddata.data import get_training_set, get_valid_set, load_test_data, test_transform\n\nfrom torch.utils.tensorboard import SummaryWriter\nfrom src.dispColor import colormap_jet_batch_image,KT15FalseColorDisp,KT15LogColorDispErr\n\n#from src.utils import writeKT15FalseColors # this is numpy fuction, it is SO SLOW !!!\n# this is cython fuction, it is SO QUICK !!!\nfrom src.cython import writeKT15FalseColor as KT15FalseClr\nfrom src.cython import writeKT15ErrorLogColor as KT15LogClr\nimport numpy as np\nimport src.pfmutil as pfm\nimport time\nfrom .models.loss import valid_accu3, MyLoss2\nfrom .models.dispnet import DispNet, scale_pyramid\n\n\n\"\"\" train and test DispNet \"\"\"\nclass MyDispNet(object):\n def __init__(self, args):\n self.args = args\n self.model_name = args.model_name\n if str(self.model_name).lower() == 'dispnets': # DispNetS\n self.is_corr = False\n elif str(self.model_name).lower() == 'dispnetc': # DispNetC\n self.is_corr = True\n else:\n raise Exception(\"No suitable model name found ... Should be DispNetS or DispNetC\")\n\n self.lr = args.lr\n self.kitti2012 = args.kitti2012\n self.kitti2015 = args.kitti2015\n self.checkpoint_dir = args.checkpoint_dir\n self.log_summary_step = args.log_summary_step\n self.isTestingMode = (str(args.mode).lower() == 'test')\n self.cuda = args.cuda\n self.weights1To6 = [6.0, 5.0, 4.0, 3.0, 2.0, 1.0]\n # if KT fine-tuning, just use the loss of the last output disp1\n if self.kitti2012 or self.kitti2015:\n self.weights1To6 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\n self.weights1To6 = [i/np.sum(self.weights1To6) for i in self.weights1To6]\n print(\"[***] weights1To6 : \", self.weights1To6)\n \n if not self.isTestingMode: # training mode\n print('===> Loading datasets')\n train_set = get_training_set(args.data_path, args.training_list, \n [args.crop_height, args.crop_width], \n args.kitti2012, args.kitti2015, args.shift, False# is_semantic\n )\n \n self.training_data_loader = DataLoader(dataset=train_set, \n num_workers=args.threads, batch_size=args.batchSize, \n shuffle=True, drop_last=True)\n \n self.train_loader_len = len(self.training_data_loader)\n self.criterion = MyLoss2(thresh=3, alpha=2)\n\n \n print('===> Building {} Model'.format(self.model_name))\n # due to TWO consecutive downsampling, so here e.g., maxdisp=48,\n # actually means 4*maxdisp=192 in the original input image pair;\n # that is why we pass args.max_disp//4 to construct DispNet;\n self.model = DispNet(\n is_corr = self.is_corr,\n maxdisp = args.max_disp//4,\n corr_func_type = args.corr_func,\n is_bn = (str(args.is_bn).lower() == 'true'),\n is_relu = (str(args.is_relu).lower() == 'true'),\n )\n if self.cuda:\n self.model = torch.nn.DataParallel(self.model).cuda()\n \n if not self.isTestingMode: # training mode\n \"\"\" We need to set requires_grad == False to freeze the parameters \n so that the gradients are not computed in backward();\n Parameters of newly constructed modules have requires_grad=True by default;\n \"\"\"\n # updated for the cases where some subnetwork was forzen!!!\n params_to_update = [p for p in self.model.parameters() if p.requires_grad]\n if 0:\n print ('[****] params_to_update = ')\n for p in params_to_update:\n print (type(p.data), p.size())\n\n print('[***]Number of model parameters: {}'.format(sum([p.data.nelement() for p in self.model.parameters()])))\n \n #NOTE: added by CCJ on Jan. 12, 2020; \n \"\"\" If you need to move a model to GPU via .cuda(), please do so \n before constructing optimizers for it. \n Parameters of a model after .cuda() will be different \n objects with those before the call. \n \"\"\"\n self.optimizer= optim.Adam(params_to_update, lr = args.lr, betas=(0.9,0.999))\n self.writer = SummaryWriter(args.train_logdir)\n \n\n if self.isTestingMode:\n assert os.path.isfile(args.resume) == True, \"Model Test but NO checkpoint found at {}\".format(args.resume)\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"[***] => loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n self.model.load_state_dict(checkpoint['state_dict'], strict=False)\n if not self.isTestingMode: # training mode\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n else:\n print(\"=> no checkpoint found at {}\".format(args.resume))\n \n \n def save_checkpoint(self, epoch, state_dict, is_best=False):\n saved_checkpts = pjoin(self.checkpoint_dir, self.model_name)\n if not os.path.exists(saved_checkpts):\n os.makedirs(saved_checkpts)\n print ('makedirs {}'.format(saved_checkpts))\n \n filename = pjoin(saved_checkpts, \"model_epoch_%05d.tar\" % epoch)\n torch.save(state_dict, filename)\n print ('Saved checkpoint at %s' % filename) \n if is_best:\n best_fname = pjoin(saved_checkpts, 'model_best.tar')\n shutil.copyfile(filename, best_fname)\n\n def adjust_learning_rate(self, epoch):\n if epoch <= 200:\n self.lr = self.args.lr\n else:\n self.lr = self.args.lr * 0.1\n \n print('learning rate = ', self.lr)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.lr\n\n def load_checkpts(self, saved_checkpts = ''):\n print(\" [*] Reading checkpoint %s\" % saved_checkpts)\n \n checkpoint = None\n if saved_checkpts and saved_checkpts != '':\n try: #Exception Handling\n f = open(saved_checkpts, 'rb')\n except IsADirectoryError as error:\n print (error)\n else:\n checkpoint = torch.load(saved_checkpts)\n return checkpoint\n\n def build_train_summaries(self, imgl, imgr, disp, disp_gt, global_step, loss, epe_err, \n epe_err1 = None, epe_err2 = None, is_KT15Color = False):\n \"\"\" loss and epe error \"\"\"\n self.writer.add_scalar(tag = 'train_loss', scalar_value = loss, global_step = global_step)\n self.writer.add_scalar(tag = 'train_err_full', scalar_value = epe_err, global_step = global_step)\n if epe_err1 is not None:\n self.writer.add_scalar(tag = 'train_err1_half', scalar_value = epe_err1, global_step = global_step)\n if epe_err2 is not None:\n self.writer.add_scalar(tag = 'train_err2_quarter', scalar_value = epe_err2, global_step = global_step)\n \n \"\"\" Add batched image data to summary:\n Note: add_images(img_tensor): img_tensor could be torch.Tensor, numpy.array, or string/blobname;\n so we could use torch.Tensor or numpy.array !!!\n \"\"\"\n self.writer.add_images(tag='train_imgl',img_tensor=imgl, global_step = global_step, dataformats='NCHW')\n if imgr is not None:\n self.writer.add_images(tag='train_imgr',img_tensor=imgr, global_step = global_step, dataformats='NCHW')\n \n with torch.set_grad_enabled(False):\n if is_KT15Color:\n disp_tmp = KT15FalseColorDisp(disp)\n disp_gt_tmp = KT15FalseColorDisp(disp_gt)\n else:\n disp_tmp = colormap_jet_batch_image(disp)\n disp_gt_tmp = colormap_jet_batch_image(disp_gt)\n\n self.writer.add_images(tag='train_disp', img_tensor=disp_tmp, global_step = global_step, dataformats='NHWC')\n self.writer.add_images(tag='train_dispGT',img_tensor=disp_gt_tmp, global_step = global_step, dataformats='NHWC')\n self.writer.add_images(tag='train_dispErr',img_tensor=KT15LogColorDispErr(disp, disp_gt), \n global_step = global_step, dataformats='NHWC')\n \n\n\n #---------------------\n #---- Training -------\n #---------------------\n def train(self, epoch):\n \"\"\"Set up TensorBoard \"\"\"\n epoch_loss = 0\n epoch_epe = 0\n epoch_accu3 = 0\n valid_iteration = 0\n\n # setting to train mode;\n self.model.train()\n self.adjust_learning_rate(epoch)\n\n \"\"\" running log loss \"\"\"\n log_running_loss = 0.0\n log_running_err = 0.0\n log_running_err1 = 0.0\n log_running_err2 = 0.0\n \n for iteration, batch_data in enumerate(self.training_data_loader):\n start = time.time()\n #print (\" [***] iteration = %d\" % iteration)\n input1 = batch_data[0].float() # False by default;\n #print (\"[???] input1 require_grad = \", input1.requires_grad) # False\n input2 = batch_data[1].float()\n target = batch_data[2].float()\n left_rgb = batch_data[3].float()\n #right_rgb = batch_data[4].float()\n \n if self.cuda:\n input1 = input1.cuda()\n input2 = input2.cuda()\n target = target.cuda()\n\n target = torch.squeeze(target,1)\n N,H,W = target.size()[:]\n # valid pixels: 0 < disparity < max_disp\n mask = (target - args.max_disp)*target < 0\n mask.detach_()\n\n num_scales = 6\n #NOTE: Updated by CCJ on 2020/01/17, adding arg `is_value_scaled=True`;\n # 1) not only down-sample target in space for target1, \n # 2) but also divide the disparity value by 2, due to down-sampled image width;\n target_pyramid = scale_pyramid(target.view(N,1,H,W), num_scales, is_value_scaled=True)\n target_pyramid = [ torch.squeeze(t, 1) for t in target_pyramid ]\n mask_pyramid = []\n for i in range(num_scales):\n ratio = 2**(i+1)\n tmp_target = target_pyramid[i]\n tmp_mask = (tmp_target - args.max_disp//ratio)*tmp_target < 0\n tmp_mask.detach_()\n mask_pyramid.append(tmp_mask)\n\n valid_disp = target[mask].size()[0]\n \n if valid_disp > 0:\n self.optimizer.zero_grad()\n # including disp0, disp1, ..., disp6\n #NOTE: disp0 is in original image size, disp1 is in half size, and so on!!!\n disp_results = self.model(input1, input2)\n disp0 = disp_results[0] # in original size\n disp1 = disp_results[1] # in half size \n disp2 = disp_results[2] # in quarter size \n target1 = target_pyramid[0] # in half size\n mask1 = mask_pyramid[0]\n target2 = target_pyramid[1] # in quarter size\n mask2 = mask_pyramid[1]\n \n loss = F.smooth_l1_loss(disp0[mask], target[mask], reduction='mean')\n for i in range(1,6):\n loss += self.weights1To6[i]*F.l1_loss(disp_results[i+1][mask_pyramid[i]], \n target_pyramid[i][mask_pyramid[i]], reduction='mean')\n\n if self.kitti2012 or self.kitti2015:\n loss = 0.4*loss + 0.6*self.criterion(disp0[mask], target[mask])\n \n loss.backward()\n self.optimizer.step()\n # MAE error\n error = torch.mean(torch.abs(disp0[mask] - target[mask]))\n # in 1/2 size\n error1 = torch.mean(torch.abs(disp1[mask1] - target1[mask1]))\n # in 1/4 size\n error2 = torch.mean(torch.abs(disp2[mask2] - target2[mask2]))\n\n # accu3, in original size\n accu = valid_accu3(target[mask], disp0[mask])\n\n epoch_loss += loss.item()\n epoch_epe += error.item()\n epoch_accu3 += accu.item() \n valid_iteration += 1\n \n # epoch - 1: here argument `epoch` is starting from 1, instead of 0 (zer0);\n train_global_step = (epoch-1)*self.train_loader_len + iteration \n print(\"===> Epoch[{}]({}/{}): Step {}, Loss: {:.3f}, EPE: {:.2f}, Acu3: {:.2f}; {:.2f} s/step\".format(\n epoch, iteration, self.train_loader_len, train_global_step,\n loss.item(), error.item(), accu.item(), time.time() -start))\n sys.stdout.flush()\n\n # save summary for tensorboard visualization\n log_running_loss += loss.item()\n log_running_err += error.item()\n log_running_err1 += error1.item()\n log_running_err2 += error2.item()\n \n if iteration % self.log_summary_step == (self.log_summary_step - 1):\n self.build_train_summaries( \n F.interpolate(left_rgb, size=[H//2, W//2], mode='bilinear', align_corners = True), \n #left_rgb,\n None, #right_rgb,\n # in the latest versions of PyTorch you can add a new axis by indexing with None \n # > see: https://discuss.pytorch.org/t/what-is-the-difference-between-view-and-unsqueeze/1155;\n #torch.unsqueeze(disp0, dim=1) ==> disp0[:,None]\n disp1[:,None], target1[:,None],\n train_global_step, \n log_running_loss/self.log_summary_step, \n log_running_err /self.log_summary_step, \n log_running_err1/self.log_summary_step, \n log_running_err2/self.log_summary_step, \n is_KT15Color = False\n #is_KT15Color = True\n )\n # reset to zeros\n log_running_loss = 0.0\n log_running_err = 0.0\n log_running_err1 = 0.0\n log_running_err2 = 0.0\n\n \n # end of data_loader\n # save the checkpoints\n avg_loss = epoch_loss / valid_iteration\n avg_err = epoch_epe / valid_iteration\n avg_accu = epoch_accu3 / valid_iteration\n print(\"===> Epoch {} Complete: Avg. Loss: {:.4f}, Avg. EPE Error: {:.4f}, Accu3: {:.4f})\".format(\n epoch, avg_loss, avg_err, avg_accu))\n\n is_best = False\n model_state_dict = {\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer' : self.optimizer.state_dict(),\n 'loss': avg_loss,\n 'epe_err': avg_err, \n 'accu3': avg_accu\n }\n\n if self.kitti2012 or self.kitti2015:\n #if epoch % 50 == 0 and epoch >= 300:\n #if epoch % 50 == 0:\n if epoch % 25 == 0:\n self.save_checkpoint(epoch, model_state_dict, is_best)\n else:\n #if epoch >= 7:\n # self.save_checkpoint(epoch, model_state_dict, is_best)\n self.save_checkpoint(epoch, model_state_dict, is_best)\n # avg loss\n return avg_loss, avg_err, avg_accu\n\n\n #---------------------\n #---- Test ----- -----\n #---------------------\n def test(self):\n self.model.eval()\n file_path = self.args.data_path\n file_list = self.args.test_list\n f = open(file_list, 'r')\n filelist = [l.rstrip() for l in f.readlines()]\n crop_width = self.args.crop_width\n crop_height = self.args.crop_height\n\n if not os.path.exists(self.args.resultDir):\n os.makedirs(self.args.resultDir)\n print ('makedirs {}'.format(self.args.resultDir))\n \n avg_err = 0\n avg_rate = 0\n for index in range(len(filelist)):\n #for index in range(1):\n current_file = filelist[index]\n if self.kitti2015:\n leftname = pjoin(file_path, 'image_0/' + current_file)\n if index % 20 == 0:\n print (\"limg: {}\".format(leftname))\n rightname = pjoin(file_path, 'image_1/' + current_file)\n dispname = pjoin(file_path, 'disp_occ_0_pfm/' + current_file[0:-4] + '.pfm')\n if os.path.isfile(dispname):\n dispGT = pfm.readPFM(dispname)\n dispGT[dispGT == np.inf] = .0\n else:\n dispGT= None\n savename = pjoin(self.args.resultDir, current_file[0:-4] + '.pfm')\n \n elif self.kitti2012:\n #leftname = pjoin(file_path, 'image_0/' + current_file)\n leftname = pjoin(file_path, 'colored_0/' + current_file)\n rightname = pjoin(file_path, 'colored_1/' + current_file)\n dispname = pjoin(file_path, 'disp_occ_0_pfm/' + current_file[0:-4] + '.pfm')\n if os.path.isfile(dispname):\n dispGT = pfm.readPFM(dispname)\n dispGT[dispGT == np.inf] = .0\n else:\n dispGT= None\n savename = pjoin(self.args.resultDir, current_file[0:-4] + '.pfm')\n #disp = Image.open(dispname)\n #disp = np.asarray(disp) / 256.0\n\n else:\n A = current_file\n leftname = pjoin(file_path, A)\n rightname = pjoin(file_path, A[:-13] + 'right/' + A[len(A)-8:]) \n # check disparity GT exists or not!!!\n pos = A.find('/')\n tmp_len = len('frames_finalpass')\n dispname = pjoin(file_path, A[0:pos] + '/disparity' + A[pos+1+tmp_len:-4] + '.pfm')\n #print (\"[****] ldisp: {}\".format(dispname))\n if os.path.isfile(dispname):\n dispGT = pfm.readPFM(dispname)\n dispGT[dispGT == np.inf] = .0\n else:\n dispGT= None\n savename = pjoin(self.args.resultDir, str(index) + '.pfm')\n\n input1, input2, height, width = test_transform(\n load_test_data(leftname, rightname), crop_height, crop_width)\n \n if self.cuda:\n input1 = input1.cuda()\n input2 = input2.cuda()\n with torch.no_grad():\n prediction1, prediction = self.model(input1, input2)\n \n disp = prediction.cpu().detach().numpy() # in full size\n disp1 = prediction1.cpu().detach().numpy() # in half size\n if height <= crop_height and width <= crop_width:\n disp = disp[0, crop_height - height: crop_height, crop_width-width: crop_width]\n disp1 = disp1[0, crop_height//2 - height//2: crop_height//2, crop_width//2-width//2: crop_width//2]\n else:\n disp = disp[0, :, :]\n disp1 = disp1[0,:,:]\n \n #skimage.io.imsave(savename, (disp * 256).astype('uint16'))\n if self.kitti2015 or self.kitti2012 or index %50 == 0:\n pfm.save(savename, disp)\n #print ('saved ', savename)\n if 0 and dispGT is not None:\n left = np.asarray(Image.open(leftname))[:,:,:3].astype(np.float32) \n right = np.asarray(Image.open(rightname))[:,:,:3].astype(np.float32)\n #print (\"[???]\", left.shape)\n pfm.save(savename[:-4] + '-iml.pfm', left)\n pfm.save(savename[:-4] + '-imr.pfm', right)\n pfm.save(savename[:-4] + '-gt.pfm', dispGT.astype(np.float32))\n pfm.save(savename[:-4] + '-half.pfm', disp1)\n \n if dispGT is not None:\n error, rate = get_epe_rate(dispGT, disp, self.args.max_disp, \n self.args.threshold)\n avg_err += error\n avg_rate += rate\n if index % 20 == 0:\n print(\"===> Frame {}: \".format(index) + leftname + \" ==> EPE Error: {:.4f}, Bad-{:.1f} Error: {:.4f}\".format(\n error, self.args.threshold, rate))\n \n # save kt15 color\n if self.kitti2015:\n \"\"\" disp \"\"\"\n tmp_dir = pjoin(self.args.resultDir, \"dispColor\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n tmp_dispname = pjoin(tmp_dir, current_file[0:-4] + '.png')\n cv2.imwrite(tmp_dispname, \n KT15FalseClr.writeKT15FalseColor(np.ascontiguousarray(disp)).astype(np.uint8)[:,:,::-1])\n if index % 20 == 0:\n print ('saved ', tmp_dispname)\n if dispGT is not None: #If KT benchmark submission, then No dispGT;\n \"\"\" err-disp \"\"\"\n tmp_dir = pjoin(self.args.resultDir, \"errDispColor\")\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n tmp_errdispname = pjoin(tmp_dir, current_file[0:-4] + '.png')\n cv2.imwrite(tmp_errdispname, \n KT15LogClr.writeKT15ErrorDispLogColor(np.ascontiguousarray(disp), np.ascontiguousarray(dispGT)).astype(np.uint8)[:,:,::-1])\n if index % 20 == 0:\n print ('saved ', tmp_errdispname)\n\n if dispGT is not None:\n avg_err = avg_err / len(filelist)\n avg_rate = avg_rate / len(filelist)\n print(\"===> Total {} Frames ==> AVG EPE Error: {:.4f}, AVG Bad-{:.1f} Error: {:.4f}\".format(\n len(filelist), avg_err, self.args.threshold, avg_rate))\n\n\n\n\ndef get_epe_rate(disp, prediction, max_disp = 192, threshold = 3.0):\n mask = np.logical_and(disp >= 0.001, disp <= max_disp)\n error = np.mean(np.abs(prediction[mask] - disp[mask]))\n rate = np.sum(np.abs(prediction[mask] - disp[mask]) > threshold) / np.sum(mask)\n #print(\" ==> EPE Error: {:.4f}, Error Rate: {:.4f}\".format(error, rate))\n return error, rate\n\n\n\ndef main(args):\n if not os.path.exists(args.checkpoint_dir):\n os.makedirs(args.checkpoint_dir)\n \n #----------------------------\n # some initilization thing \n #---------------------------\n cuda = args.cuda\n if cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU found, please run without --cuda\")\n torch.manual_seed(args.seed)\n if cuda:\n torch.cuda.manual_seed(args.seed)\n \n myNet = MyDispNet(args)\n \"\"\" for debugging \"\"\"\n if args.mode == 'debug':\n myNet.model.train()\n import gc\n crop_h = 256\n crop_w = 512\n #x_l = torch.randn((1, 3, crop_h, crop_w), requires_grad=True)\n #x_r = torch.randn((1, 3, crop_h, crop_w), requires_grad=True)\n x_l = torch.randn((1, 3, crop_h, crop_w)).cuda()\n x_r = torch.randn((1, 3, crop_h, crop_w)).cuda()\n y = torch.randn((1, crop_h, crop_w)).cuda()\n z = torch.randn((1, 1, crop_h//3, crop_w//3)).cuda()\n\n \n from pytorch_memlab import profile, MemReporter\n # pass in a model to automatically infer the tensor names\n # You can better understand the memory layout for more complicated module\n if 1:\n reporter = MemReporter(myNet.model)\n disp = myNet.model(x_l, x_r)\n loss = F.smooth_l1_loss(disp, y, reduction='mean')\n reporter.report(verbose=True)\n print('========= before backward =========')\n loss.backward()\n reporter.report(verbose=True)\n\n # generate prof which can be loaded by Google chrome trace at chrome://tracing/\n if 1:\n with torch.autograd.profiler.profile(use_cuda=True) as prof:\n myNet.model(x_l, x_r)\n print(prof)\n prof.export_chrome_trace('./results/tmp/prof.out')\n \n if args.mode == 'train':\n print('strat training !!!')\n for epoch in range(1 + args.startEpoch, args.startEpoch + args.nEpochs + 1):\n print (\"[**] do training at epoch %d/%d\" % (epoch, args.startEpoch + args.nEpochs))\n\n with torch.autograd.set_detect_anomaly(True):\n avg_loss, avg_err, avg_accu = myNet.train(epoch)\n # save the last epoch always!!\n myNet.save_checkpoint(args.nEpochs + args.startEpoch,\n {\n 'epoch': args.nEpochs + args.startEpoch,\n 'state_dict': myNet.model.state_dict(),\n 'optimizer' : myNet.optimizer.state_dict(),\n 'loss': avg_loss,\n 'epe_err': avg_err, \n 'accu3': avg_accu\n }, \n is_best = False)\n print('done training !!!')\n \n if args.mode == 'test': \n print('strat testing !!!')\n myNet.test()\n\n\nif __name__ == '__main__':\n \n import argparse\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch GANet Example')\n parser.add_argument('--crop_height', type=int, required=True, help=\"crop height\")\n parser.add_argument('--max_disp', type=int, default=192, help=\"max disp\")\n parser.add_argument('--crop_width', type=int, required=True, help=\"crop width\")\n parser.add_argument('--resume', type=str, default='', help=\"resume from saved model\")\n parser.add_argument('--batchSize', type=int, default=1, help='training batch size')\n parser.add_argument('--log_summary_step', type=int, default=200, help='every 200 steps to build training summary')\n parser.add_argument('--nEpochs', type=int, default=400, help='number of epochs to train for')\n parser.add_argument('--startEpoch', type=int, default=0, help='starting point, used for fine-tuning')\n parser.add_argument('--lr', type=float, default=0.001, help='Learning Rate. Default=0.001')\n parser.add_argument('--cuda', type=int, default=1, help='use cuda? Default=True')\n parser.add_argument('--threads', type=int, default=1, help='number of threads for data loader to use')\n parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\n parser.add_argument('--shift', type=int, default=0, help='random shift of left image. Default=0')\n parser.add_argument('--kitti2012', type=int, default=0, help='kitti 2012 dataset? Default=False')\n parser.add_argument('--kitti2015', type=int, default=0, help='kitti 2015? Default=False')\n parser.add_argument('--data_path', type=str, default='/data/ccjData', help=\"data root\")\n parser.add_argument('--training_list', type=str, default='./lists/sceneflow_train.list', help=\"training list\")\n parser.add_argument('--test_list', type=str, default='./lists/sceneflow_test_select.list', help=\"evaluation list\")\n parser.add_argument('--checkpoint_dir', type=str, default='./checkpoint/', help=\"location to save models\")\n parser.add_argument('--train_logdir', dest='train_logdir', default='./logs/tmp', help='log dir')\n \"\"\"Arguments related to run mode\"\"\"\n parser.add_argument('--model_name', type=str, default='DispNetC', help=\"model name\")\n parser.add_argument('--mode', dest='mode', type = str, default='train', help='train, test')\n parser.add_argument('--resultDir', type=str, default= \"./results\")\n parser.add_argument('--is_bn', dest='is_bn', type = str, default='true', help='using BN or not')\n parser.add_argument('--is_relu', dest='is_relu', type = str, default='true', help='using ReLU or not')\n parser.add_argument('--corr_func', dest='corr_func', type = str, default='correlation1D_map_V1', help='corr1D function type')\n parser.add_argument('--threshold', type=float, default=3.0, help=\"threshold of error rates\")\n\n args = parser.parse_args()\n print('[***] args = ', args)\n main(args)\n",
"# !/usr/bin/env python3\n# -*-coding:utf-8-*-\n# @file: pascal_voc_loader.py\n# @brief:\n# @author: Changjiang Cai, [email protected], [email protected]\n# @version: 0.0.1\n# @creation date: 23-09-2019\n# @last modified: Fri 01 Nov 2019 03:36:15 PM EDT\n\n\"\"\" coded adapted from https://github.com/meetshah1995/pytorch-semseg/blob/master/ptsemseg/loader/pascal_voc_loader.py \"\"\"\n\nimport os\nfrom os.path import join as pjoin\nimport collections\nimport json\nimport torch\nimport numpy as np\nimport scipy.misc as m\nimport scipy.io as io\nimport matplotlib.pyplot as plt\nimport glob\n\nfrom PIL import Image\n# see https://pypi.org/project/tqdm/\nfrom tqdm import tqdm\nfrom torch.utils import data\nfrom torchvision import transforms\n\n\n\"\"\" added by CCJ: \"\"\"\ndef pascal_voc_color_map(N=256, normalized=False):\n \"\"\" see source code func colorize() at https://gist.github.com/jimfleming/c1adfdb0f526465c99409cc143dea97b\"\"\"\n def bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((N, 3), dtype=dtype)\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap/255 if normalized else cmap\n return cmap\n\n\n\ndef get_pascal_labels():\n \"\"\"Load the mapping that associates pascal classes with label colors\n Returns:\n np.ndarray with dimensions (21, 3)\n \"\"\"\n return np.asarray(\n [\n [0, 0, 0], # 0: background\n [128, 0, 0], #1: aeroplane\n [0, 128, 0], # 2: bicycle\n [128, 128, 0], # 3: bird\n [0, 0, 128], #4:'boat' \n [128, 0, 128], #5: 'bottle'\n [0, 128, 128], #6: 'bus'\n [128, 128, 128], #7: 'car'\n [64, 0, 0], #8: 'cat'\n [192, 0, 0], #9: 'chair'\n [64, 128, 0],#10: 'cow'\n [192, 128, 0],#11: 'diningtable'\n [64, 0, 128], #12: 'dog'\n [192, 0, 128], #13:#'horse'\n [64, 128, 128],#14: 'motorbike'\n [192, 128, 128],#15:'person'\n [0, 64, 0],#16:'pottedplant'\n [128, 64, 0],#17:sheep\n [0, 192, 0],#18:sofa\n [128, 192, 0],#19:train\n [0, 64, 128],#20:'tv/monitor'\n ]\n )\n\ndef pascal_voc_batch_label2RGB(batch_label_mask):\n \"\"\"Decode segmentation class labels into a color image\n\n Args:\n batch_label_mask (np.ndarray): an (N, H, W) or (N,H,W,1), or (N,1,H,W) \n array of integer values denoting the class \n label at each spatial location.\n\n Returns:\n (np.ndarray, optional): the resulting decoded color image.\n \"\"\"\n #print (\"[???] batch_label_mask shape = \", batch_label_mask.shape )\n if batch_label_mask.shape[-1] == 1:\n channel_dim = -1\n elif batch_label_mask.shape[1] == 1:\n channel_dim = 1\n\n indices = np.squeeze(batch_label_mask, axis=channel_dim).astype(np.int32) # in shape [N, H, W]\n N, H, W = indices.shape[:]\n #print (\"[???] indices.shape = \", indices.shape)\n \n batch_label_rgb = np.zeros((N,H,W,3))\n #get pascal voc colormap\n cmap = pascal_voc_color_map(N=256, normalized=False)# in shape [256, 3]\n #print ('[???] camp shape = ', cmap.shape)\n for j in range(N):\n # gather\n batch_label_rgb[j,:,:,0] = np.take(cmap[:,0], indices[j]) / 255 # Red\n batch_label_rgb[j,:,:,1] = np.take(cmap[:,1], indices[j]) / 255 # Green\n batch_label_rgb[j,:,:,2] = np.take(cmap[:,2], indices[j]) / 255 # Blue\n \n return batch_label_rgb\n \n\nclass pascalVOCLoader(data.Dataset):\n \"\"\"Data loader for the Pascal VOC semantic segmentation dataset.\n\n Annotations from both the original VOC data (which consist of RGB images\n in which colours map to specific classes) and the SBD (Berkely) dataset(\n > see: http://home.bharathh.info/pubs/codes/SBD/download.html) (where \n annotations are stored as .mat files) are converted into a common\n `label_mask` format. Under this format, each mask is an (M,N) array of\n integer values from 0 to 21, where 0 represents the background class.\n\n The label masks are stored in a new folder, called `pre_encoded`, which\n is added as a subdirectory of the `SegmentationClass` folder in the\n original Pascal VOC data layout.\n\n A total of five data splits are provided for working with the VOC data:\n train: The original VOC 2012 training data - 1464 images\n val: The original VOC 2012 validation data - 1449 images\n trainval: The combination of `train` and `val` - 2913 images\n train_aug: The unique images present in both the train split and\n training images from SBD: - 8829 images (the unique members\n of the result of combining lists of length 1464 and 8498)\n train_aug_val: The original VOC 2012 validation data minus the images\n present in `train_aug` (This is done with the same logic as\n the validation set used in FCN PAMI paper, but with VOC 2012\n rather than VOC 2011) - 904 images\n \"\"\"\n\n def __init__(\n self,\n root,\n sbd_path=None,\n split=\"train_aug\",\n is_transform=False,\n img_size=161, # 512\n augmentations=None,\n #img_norm=True,\n test_mode=False,\n ):\n self.root = root\n self.sbd_path = sbd_path\n self.split = split\n self.is_transform = is_transform\n self.augmentations = augmentations\n #self.img_norm = img_norm\n self.test_mode = test_mode\n self.n_classes = 21\n self.mean = np.array([104.00699, 116.66877, 122.67892])\n self.files = collections.defaultdict(list)\n self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)\n\n if not self.test_mode:\n for split in [\"train\", \"val\", \"trainval\"]:\n path = pjoin(self.root, \"ImageSets/Segmentation\", split + \".txt\")\n file_list = tuple(open(path, \"r\"))\n file_list = [id_.rstrip() for id_ in file_list]\n self.files[split] = file_list\n self.setup_annotations()\n\n self.tf = transforms.Compose(\n [\n #ToTensor(): Converts a PIL Image or numpy.ndarray (H x W x C) \n # in the range [0, 255] to a torch.FloatTensor of \n # shape (C x H x W) in the range [0.0, 1.0] if the \n # PIL Image belongs to one of the modes (L, LA, P, I, \n # F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray \n # has dtype = np.uint8\n transforms.ToTensor(),\n # > see: https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683\n # All pretrained torchvision models have the same preprocessing, \n # which is to normalize using the following mean/std values:\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], #imagenet mean\n std=[0.229, 0.224, 0.225]), # imagenet std\n ]\n )\n\n def __len__(self):\n return len(self.files[self.split])\n\n def __getitem__(self, index):\n im_name = self.files[self.split][index]\n im_path = pjoin(self.root, \"JPEGImages\", im_name + \".jpg\")\n lbl_path = pjoin(self.root, \"SegmentationClass/pre_encoded\", im_name + \".png\")\n im = Image.open(im_path)\n lbl = Image.open(lbl_path)\n #print (\"[***] img size : \", im.size)\n #print (\"[***] lable size : \", lbl.size)\n if self.augmentations is not None:\n im, lbl = self.augmentations(im, lbl)\n if self.is_transform:\n im, lbl = self.transform(im, lbl)\n\n # change [H, W] to [C = 1, H, W]\n lbl = torch.unsqueeze(lbl, 0)\n #print (\"[***] expend lable shape : \", lbl.shape)\n return im, lbl\n\n def transform(self, img, lbl):\n if self.img_size == (\"same\", \"same\"):\n pass\n else:\n img = img.resize((self.img_size[0], self.img_size[1]))# uint8 with RGB mode\n lbl = lbl.resize((self.img_size[0], self.img_size[1]))\n img = self.tf(img) # in shape (C x H x W), in the range [0.0, 1.0];\n #lbl = torch.from_numpy(np.array(lbl)).long()\n lbl = np.array(lbl)\n #lbl[lbl == 255] = 0 # disabled it, updated on 2019/11/01;\n lbl = torch.from_numpy(lbl).float()\n return img, lbl\n \"\"\"\n pascol_labels = [\n 'background', #0\n 'aeroplane', #1\n 'bicycle', #2\n 'bird', #3\n 'boat', #4\n 'bottle', #5\n 'bus', #6\n 'car', #7\n 'cat', #8\n 'chair', #9\n 'cow', #10\n 'diningtable', #11\n 'dog', #12\n 'horse', #13\n 'motorbike', #14\n 'person', #15\n 'pottedplant', #16\n 'sheep', #17\n 'sofa', #18\n 'train', #19\n 'tv/monitor', #20\n \"void/unlabelled\", #255\n ] \n \"\"\"\n\n def get_pascal_labels(self):\n \"\"\"Load the mapping that associates pascal classes with label colors\n\n Returns:\n np.ndarray with dimensions (21, 3)\n \"\"\"\n return np.asarray(\n [\n [0, 0, 0], # 0: background\n [128, 0, 0], #1: aeroplane\n [0, 128, 0], # 2: bicycle\n [128, 128, 0], # 3: bird\n [0, 0, 128], #4:'boat' \n [128, 0, 128], #5: 'bottle'\n [0, 128, 128], #6: 'bus'\n [128, 128, 128], #7: 'car'\n [64, 0, 0], #8: 'cat'\n [192, 0, 0], #9: 'chair'\n [64, 128, 0],#10: 'cow'\n [192, 128, 0],#11: 'diningtable'\n [64, 0, 128], #12: 'dog'\n [192, 0, 128], #13:#'horse'\n [64, 128, 128],#14: 'motorbike'\n [192, 128, 128],#15:'person'\n [0, 64, 0],#16:'pottedplant'\n [128, 64, 0],#17:sheep\n [0, 192, 0],#18:sofa\n [128, 192, 0],#19:train\n [0, 64, 128],#20:'tv/monitor'\n ]\n )\n\n def encode_segmap(self, mask):\n \"\"\"Encode segmentation label images as pascal classes\n\n Args:\n mask (np.ndarray): raw segmentation label image of dimension\n (M, N, 3), in which the Pascal classes are encoded as colors.\n\n Returns:\n (np.ndarray): class map with dimensions (M,N), where the value at\n a given location is the integer denoting the class index.\n \"\"\"\n mask = mask.astype(int)\n label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)\n for ii, label in enumerate(self.get_pascal_labels()):\n label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii\n label_mask = label_mask.astype(int)\n return label_mask\n\n def decode_segmap(self, label_mask, plot=False):\n \"\"\"Decode segmentation class labels into a color image\n\n Args:\n label_mask (np.ndarray): an (M,N) array of integer values denoting\n the class label at each spatial location.\n plot (bool, optional): whether to show the resulting color image\n in a figure.\n\n Returns:\n (np.ndarray, optional): the resulting decoded color image.\n \"\"\"\n label_colours = self.get_pascal_labels()\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, self.n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb\n\n def setup_annotations(self):\n \"\"\"Sets up Berkley annotations by adding image indices to the\n `train_aug` split and pre-encode all segmentation labels into the\n common label_mask format (if this has not already been done). This\n function also defines the `train_aug` and `train_aug_val` data splits\n according to the description in the class docstring\n \"\"\"\n sbd_path = self.sbd_path\n target_path = pjoin(self.root, \"SegmentationClass/pre_encoded\")\n if not os.path.exists(target_path):\n os.makedirs(target_path)\n #print ('sbd_path = ', sbd_path)\n path = pjoin(sbd_path, \"dataset/train.txt\")\n #print ('path = ', path)\n sbd_train_list = tuple(open(path, \"r\"))\n sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]\n train_aug = self.files[\"train\"] + sbd_train_list\n\n # keep unique elements (stable)\n train_aug = [train_aug[i] for i in sorted(np.unique(train_aug, return_index=True)[1])]\n self.files[\"train_aug\"] = train_aug\n set_diff = set(self.files[\"val\"]) - set(train_aug) # remove overlap\n self.files[\"train_aug_val\"] = list(set_diff)\n\n pre_encoded = glob.glob(pjoin(target_path, \"*.png\"))\n expected = np.unique(self.files[\"train_aug\"] + self.files[\"val\"]).size\n\n if len(pre_encoded) != expected:\n print(\"Pre-encoding segmentation masks...\")\n for ii in tqdm(sbd_train_list):\n lbl_path = pjoin(sbd_path, \"dataset/cls\", ii + \".mat\")\n data = io.loadmat(lbl_path)\n lbl = data[\"GTcls\"][0][\"Segmentation\"][0].astype(np.int32)\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, ii + \".png\"), lbl)\n\n for ii in tqdm(self.files[\"trainval\"]):\n fname = ii + \".png\"\n lbl_path = pjoin(self.root, \"SegmentationClass\", fname)\n lbl = self.encode_segmap(m.imread(lbl_path))\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, fname), lbl)\n\n assert expected == 9733, \"unexpected dataset sizes\"\n\n\n# Leave code for debugging purposes\nif __name__ == \"__main__\":\n #dummy main\n import src.augmentations as aug\n from six.moves import input\n local_path = '/media/ccjData2/datasets/pascal_voc_seg/VOCdevkit/VOC2012/'\n bs = 4\n prob = 0.5\n sbd_path = '/media/ccjData2/datasets/pascal_voc_seg/SBD_benchmark_RELEASE/'\n augs = aug.Compose([aug.RandomRotate(10), aug.RandomHorizontallyFlip(prob)])\n dst = pascalVOCLoader(root=local_path, sbd_path = sbd_path, is_transform=True, augmentations=augs)\n trainloader = data.DataLoader(dst, batch_size=bs)\n for i, data in enumerate(trainloader):\n imgs, labels = data\n imgs = imgs.numpy()[:, ::-1, :, :]\n imgs = np.transpose(imgs, [0,2,3,1])\n f, axarr = plt.subplots(bs, 2)\n for j in range(bs):\n axarr[j][0].imshow(imgs[j])\n axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))\n plt.show()\n a = input()\n if a == 'ex':\n break\n else:\n plt.close()\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.take",
"numpy.squeeze",
"matplotlib.pyplot.subplots",
"torch.unsqueeze",
"numpy.all",
"matplotlib.pyplot.close",
"numpy.transpose",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros"
],
[
"torch.abs",
"torch.autograd.set_detect_anomaly",
"torch.load",
"torch.nn.functional.l1_loss",
"torch.utils.data.DataLoader",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.nn.functional.smooth_l1_loss",
"torch.nn.functional.interpolate",
"torch.save",
"torch.randn",
"torch.squeeze",
"torch.optim.Adam",
"numpy.ascontiguousarray",
"numpy.logical_and",
"numpy.sum",
"numpy.abs",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.nn.DataParallel",
"torch.autograd.profiler.profile"
],
[
"matplotlib.pyplot.imshow",
"numpy.take",
"numpy.unique",
"numpy.asarray",
"numpy.squeeze",
"scipy.io.loadmat",
"matplotlib.pyplot.subplots",
"torch.unsqueeze",
"torch.from_numpy",
"numpy.all",
"scipy.misc.imread",
"matplotlib.pyplot.close",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.10",
"0.16",
"0.19",
"0.18",
"0.12",
"1.0",
"0.17",
"1.2"
],
"tensorflow": []
}
] |
escofresco/picint | [
"ee94d72cf25bf87472f6f60ad4c80821143eec28"
] | [
"mnist.py"
] | [
"# Train, Evaluate and Save the DL Model\n\n\nfrom __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nimport numpy as np\n\ndef main():\n\n batch_size = 128\n num_classes = 10\n epochs = 1\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n print('input_shape')\n print(input_shape)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n model.save('mnist_cnn_model.h5')\n\n # Save the weights\n model.save_weights('model_weights.h5')\n # Save the model architecture\n with open('model_architecture.json', 'w') as f:\n f.write(model.to_json())\n\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n print('M:')\n print(y_test[0])\n print(x_test[0].shape)\n x = x_test[0].reshape(1, 28, 28, 1)\n out = model.predict(x)\n print(out[0])\n print(np.argmax(out[0]))\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ondrados/bachelor-thesis | [
"1ce6f40dfdeadbdcc31a1cce785962f9cf3145fd",
"1ce6f40dfdeadbdcc31a1cce785962f9cf3145fd"
] | [
"yolo_v3/predict2.py",
"faster_rcnn/eval.py"
] | [
"import os\nimport time\nimport torch\nimport numpy as np\nfrom PIL import Image, ImageDraw\nfrom matplotlib import pyplot as plt\nfrom torch.utils.data import DataLoader, random_split\n\nfrom data_utils import MyTestDataset, get_test_transforms\nfrom models import Darknet\nfrom utils import non_max_suppression\n\nfrom conf.settings import BASE_DIR\n\n\nmodels_path = os.path.join(BASE_DIR, \"models\")\nimages_path = os.path.join(BASE_DIR, \"images\")\n\nif __name__ == \"__main__\":\n attempt = 4\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(f\"Running on {device}...\")\n\n model = Darknet(os.path.join(BASE_DIR, \"yolo_v3/config/yolov3-custom.cfg\")).to(device)\n model.load_state_dict(torch.load(os.path.join(models_path, \"yolo_v3_4_17.pt\"), map_location=device))\n\n model2 = Darknet(os.path.join(BASE_DIR, \"yolo_v3/config/yolov3-custom.cfg\")).to(device)\n model2.load_state_dict(torch.load(os.path.join(models_path, \"yolo_v3_4_20.pt\"), map_location=device))\n\n model3 = Darknet(os.path.join(BASE_DIR, \"yolo_v3/config/yolov3-custom.cfg\")).to(device)\n model3.load_state_dict(torch.load(os.path.join(models_path, \"yolo_v3_4_25.pt\"), map_location=device))\n\n dataset = MyTestDataset(split='stage1_train', transforms=get_test_transforms(rescale_size=(416, 416)))\n\n test_loader = DataLoader(dataset, batch_size=1, num_workers=0, shuffle=False)\n\n model.eval()\n model2.eval()\n model3.eval()\n for i, (image, targets) in enumerate(test_loader):\n image = image[0].to(device=device)\n name = targets[\"name\"][0]\n start_time = time.time()\n with torch.no_grad():\n outputs = model(image)\n outputs2 = model2(image)\n outputs3 = model3(image)\n outputs = non_max_suppression(outputs, conf_thres=0.5)\n outputs2 = non_max_suppression(outputs2, conf_thres=0.5)\n outputs3 = non_max_suppression(outputs3, conf_thres=0.5)\n\n elapsed_time = time.time() - start_time\n if outputs[0] is not None:\n boxes = outputs[0][:, 0:4]\n boxes2 = outputs2[0][:, 0:4]\n boxes3 = outputs3[0][:, 0:4]\n else:\n continue\n\n image_copy = Image.fromarray(image.cpu().numpy()[0, 0, :, :])\n if image_copy.mode != \"RGB\":\n image_copy = image_copy.convert(\"RGB\")\n draw = ImageDraw.Draw(image_copy)\n for box in boxes:\n x0, y0, x1, y1 = box\n draw.rectangle([(x0, y0), (x1, y1)], outline=(255, 0, 255))\n\n image_copy2 = Image.fromarray(image.cpu().numpy()[0, 0, :, :])\n if image_copy2.mode != \"RGB\":\n image_copy2 = image_copy2.convert(\"RGB\")\n draw = ImageDraw.Draw(image_copy2)\n for box in boxes2:\n x0, y0, x1, y1 = box\n draw.rectangle([(x0, y0), (x1, y1)], outline=(255, 0, 255))\n\n image_copy3 = Image.fromarray(image.cpu().numpy()[0, 0, :, :])\n if image_copy3.mode != \"RGB\":\n image_copy3 = image_copy3.convert(\"RGB\")\n draw = ImageDraw.Draw(image_copy3)\n for box in boxes3:\n x0, y0, x1, y1 = box\n draw.rectangle([(x0, y0), (x1, y1)], outline=(255, 0, 255))\n # image_copy.show()\n # image_copy.save(os.path.join(images_path, f\"yolo_v3/{attempt}/images/{name}.png\"))\n print(f\"{name}, time: {elapsed_time}\")\n\n fig = plt.figure(dpi=400)\n ax1 = fig.add_subplot(1, 3, 1)\n ax1.imshow(image_copy)\n ax2 = fig.add_subplot(1, 3, 2)\n ax2.imshow(image_copy2)\n ax3 = fig.add_subplot(1, 3, 3)\n ax3.imshow(image_copy3)\n plt.show()\n\n",
"import os\nimport math\nimport torch\nimport numpy as np\nfrom PIL import Image, ImageDraw\nfrom torch.utils.data import random_split, DataLoader\nfrom matplotlib import pyplot as plt\nfrom faster_rcnn.models import model\n\nfrom data_utils import MyDataset, get_transforms, my_collate\n\nfrom conf.settings import BASE_DIR\n\nmodels_path = os.path.join(BASE_DIR, \"models\")\nimages_path = os.path.join(BASE_DIR, \"images\")\n\n\ndef evaluate(model, eval_loader, dist_threshold=3):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n runnning_dice_vec = 0\n runnning_prec_vec = 0\n runnning_rec_vec = 0\n dice_vec = []\n model.eval()\n for i, (image, targets) in enumerate(eval_loader):\n\n name = targets[0][\"name\"]\n image = image[0].to(device=device)\n targets = [{\n \"boxes\": targets[0][\"boxes\"].to(device=device),\n \"labels\": targets[0][\"labels\"].to(device=device),\n \"name\": name\n }]\n\n with torch.no_grad():\n predictions = model(image)\n\n image_copy = Image.fromarray(image.cpu().numpy()[0, 0, :, :])\n if image_copy.mode != \"RGB\":\n image_copy = image_copy.convert(\"RGB\")\n # draw = ImageDraw.Draw(image_copy)\n # for box in targets[0][\"boxes\"]:\n # x0, y0, x1, y1 = box\n # # draw.rectangle([(x0, y0), (x1, y1)], outline=(0, 255, 0))\n # for box, score in zip(predictions[0][\"boxes\"], predictions[0][\"scores\"]):\n # if score > 0.5:\n # x0, y0, x1, y1 = box\n # # draw.rectangle([(x0, y0), (x1, y1)], outline=(255, 0, 255))\n # # image_copy.show()\n # # image_copy.save(os.path.join(images_path, f\"faster_rcnn/{attempt}/images/{name}.png\"))\n # plt.imshow(image_copy)\n # plt.show()\n\n gt_x = []\n gt_y = []\n for box in targets[0][\"boxes\"]:\n x0, y0, x1, y1 = box\n x = ((x0 + x1) / 2).tolist()\n y = ((y0 + y1) / 2).tolist()\n gt_x.append(x)\n gt_y.append(y)\n\n pred_x = []\n pred_y = []\n for box, score in zip(predictions[0][\"boxes\"], predictions[0][\"scores\"]):\n if score > 0.5:\n x0, y0, x1, y1 = box\n x = ((x0 + x1) / 2).tolist()\n y = ((y0 + y1) / 2).tolist()\n pred_x.append(x)\n pred_y.append(y)\n\n # fig = plt.figure(dpi=300)\n # ax1 = fig.add_subplot(1, 1, 1)\n # ax1.imshow(image_copy)\n # ax1.plot(gt_x, gt_y, 'g+', linewidth=3, markersize=12)\n # ax1.plot(pred_x, pred_y, 'm+', linewidth=3, markersize=12)\n # plt.show()\n\n dist_matrix = np.zeros((len(gt_x), len(pred_x)))\n for row, (g_x, g_y) in enumerate(zip(gt_x, gt_y)):\n for col, (p_x, p_y) in enumerate(zip(pred_x, pred_y)):\n x = abs(g_x - p_x)\n y = abs(g_y - p_y)\n dist_matrix[row, col] = math.sqrt((x*x)+(y*y))\n\n min_dists = np.amin(dist_matrix, axis=0)\n\n tp = 0\n fp = 0\n for dist in min_dists:\n if dist <= dist_threshold:\n tp += 1\n else:\n fp += 1\n tp = len(gt_x) if tp > len(gt_x) else tp\n fn = len(gt_x) - tp\n if (tp + fp) == 0:\n precision = 0\n else:\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n dice = (2 * tp) / (2 * tp + fp + fn)\n runnning_dice_vec += dice\n runnning_prec_vec += precision\n runnning_rec_vec += recall\n\n dice_vec.append(dice)\n\n print(f\"{name}, TP: {tp}, FP: {fp}, FN: {fn}, precision: {precision}, recall: {recall}, dice: {dice}\")\n # print(f\"Iteration: {i} of {len(eval_loader)}, image: {name}\")\n\n # dice_vec.append(runnning_dice_vec / len(eval_loader))\n # prec_vec.append(runnning_prec_vec / len(eval_loader))\n # rec_vec.append(runnning_rec_vec / len(eval_loader))\n\n prec_result = runnning_prec_vec / len(eval_loader)\n rec_result = runnning_rec_vec / len(eval_loader)\n dice_result = runnning_dice_vec / len(eval_loader)\n\n return prec_result, rec_result, dice_result, dice_vec\n\n\nif __name__ == \"__main__\":\n from models import model\n\n attempt = 7\n model_name = \"faster_rcnn_7_30.pt\"\n\n os.makedirs(models_path, exist_ok=True)\n os.makedirs(os.path.join(images_path, f\"faster_rcnn/{attempt}/images\"), exist_ok=True)\n os.makedirs(os.path.join(images_path, f\"faster_rcnn/{attempt}/plots\"), exist_ok=True)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n print(f\"Running on {device}, using {model_name}\")\n print(f\"This is {attempt}. attempt\")\n\n model.load_state_dict(torch.load(os.path.join(models_path, model_name), map_location=device))\n model.to(device=device)\n\n split = \"stage1_train\"\n dataset = MyDataset(split=split, transforms=get_transforms(train=True, rescale_size=(256, 256)))\n trainset, evalset = random_split(dataset, [600, 70])\n\n train_loader = DataLoader(trainset, batch_size=1, num_workers=0, shuffle=True, collate_fn=my_collate)\n eval_loader = DataLoader(evalset, batch_size=1, num_workers=0, shuffle=False, collate_fn=my_collate)\n precision, recall, dice, dice_vec = evaluate(model, eval_loader, dist_threshold=3)\n\n print(f\"Done, precision: {precision}, recall: {recall}, dice: {dice}\")\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.amin",
"torch.utils.data.DataLoader",
"torch.utils.data.random_split",
"torch.no_grad",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chillee/benchmark | [
"91e1b2871327e44b9b7d24d173ca93720fb6565b",
"91e1b2871327e44b9b7d24d173ca93720fb6565b",
"91e1b2871327e44b9b7d24d173ca93720fb6565b",
"a8a458230489710ab945b37ec22e93315230f2de",
"a8a458230489710ab945b37ec22e93315230f2de",
"a8a458230489710ab945b37ec22e93315230f2de"
] | [
"torchbenchmark/models/demucs/demucs/separate.py",
"legacy/rnns/benchmarks/torchqrnn/forget_mult.py",
"legacy/rnns/benchmarks/cudnn_lstm.py",
"torchbenchmark/models/fastNLP/fastNLP/core/field.py",
"torchbenchmark/models/fastNLP/test/modules/encoder/test_seq2seq_encoder.py",
"torchbenchmark/models/fastNLP/reproduction/coreference_resolution/model/softmax_loss.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport hashlib\nimport sys\nfrom pathlib import Path\n\nimport requests\nimport torch as th\nimport tqdm\nfrom scipy.io import wavfile\n\nfrom .audio import AudioFile\nfrom .utils import apply_model, load_model\n\nBASE_URL = \"https://dl.fbaipublicfiles.com/demucs/v2.0/\"\nPRETRAINED_MODELS = {\n 'demucs.th': 'f6c4148ba0dc92242d82d7b3f2af55c77bd7cb4ff1a0a3028a523986f36a3cfd',\n 'demucs.th.gz': 'e70767bfc9ce62c26c200477ea29a20290c708b210977e3ef2c75ace68ea4be1',\n 'demucs_extra.th': '3331bcc5d09ba1d791c3cf851970242b0bb229ce81dbada557b6d39e8c6a6a87',\n 'demucs_extra.th.gz': 'f9edcf7fe55ea5ac9161c813511991e4ba03188112fd26a9135bc9308902a094',\n 'light.th': '79d1ee3c1541c729c552327756954340a1a46a11ce0009dea77dc583e4b6269c',\n 'light.th.gz': '94c091021d8cdee0806b6df0afbeb59e73e989dbc2c16d2c1c370b2edce774fd',\n 'light_extra.th': '9e9b4af564229c80cc73c95d02d2058235bb054c6874b3cba4d5b26943a5ddcb',\n 'light_extra.th.gz': '48bb1a85f5ad0ca400512fcd0dcf91ec94e886a1602a552ee32133f5e09aeae0',\n 'tasnet.th': 'be56693f6a5c4854b124f95bb9dd043f3167614898493738ab52e25648bec8a2',\n 'tasnet_extra.th': '0ccbece3acd98785a367211c9c35b1eadae8d148b0d37fe5a5494d6d335269b5',\n}\n\n\ndef download_file(url, target):\n \"\"\"\n Download a file with a progress bar.\n\n Arguments:\n url (str): url to download\n target (Path): target path to write to\n sha256 (str or None): expected sha256 hexdigest of the file\n \"\"\"\n def _download():\n response = requests.get(url, stream=True)\n total_length = int(response.headers.get('content-length', 0))\n\n with tqdm.tqdm(total=total_length, ncols=120, unit=\"B\", unit_scale=True) as bar:\n with open(target, \"wb\") as output:\n for data in response.iter_content(chunk_size=4096):\n output.write(data)\n bar.update(len(data))\n\n try:\n _download()\n except: # noqa, re-raising\n if target.exists():\n target.unlink()\n raise\n\n\ndef verify_file(target, sha256):\n hasher = hashlib.sha256()\n with open(target, \"rb\") as f:\n while True:\n data = f.read(65536)\n if not data:\n break\n hasher.update(data)\n signature = hasher.hexdigest()\n if signature != sha256:\n print(\n f\"Invalid sha256 signature for the file {target}. Expected {sha256} but got \"\n f\"{signature}.\\nIf you have recently updated the repo, it is possible \"\n \"the checkpoints have been updated. It is also possible that a previous \"\n f\"download did not run to completion.\\nPlease delete the file '{target.absolute()}' \"\n \"and try again.\",\n file=sys.stderr)\n sys.exit(1)\n\n\ndef encode_mp3(wav, path, verbose=False):\n try:\n import lameenc\n except ImportError:\n print(\"Failed to call lame encoder. Maybe it is not installed? \"\n \"On windows, run `python.exe -m pip install -U lameenc`, \"\n \"on OSX/Linux, run `python3 -m pip install -U lameenc`, \"\n \"then try again.\", file=sys.stderr)\n sys.exit(1)\n encoder = lameenc.Encoder()\n encoder.set_bit_rate(320)\n encoder.set_in_sample_rate(44100)\n encoder.set_channels(2)\n encoder.set_quality(2) # 2-highest, 7-fastest\n if not verbose:\n encoder.silence()\n mp3_data = encoder.encode(wav.tostring())\n mp3_data += encoder.flush()\n with open(path, \"wb\") as f:\n f.write(mp3_data)\n\n\ndef main():\n parser = argparse.ArgumentParser(\"demucs.separate\",\n description=\"Separate the sources for the given tracks\")\n parser.add_argument(\"tracks\", nargs='+', type=Path, default=[], help='Path to tracks')\n parser.add_argument(\"-n\",\n \"--name\",\n default=\"demucs\",\n help=\"Model name. See README.md for the list of pretrained models. \"\n \"Default is demucs.\")\n parser.add_argument(\"-Q\", \"--quantized\", action=\"store_true\", dest=\"quantized\", default=False,\n help=\"Load the quantized model rather than the quantized version. \"\n \"Quantized model is about 4 times smaller but might worsen \"\n \"slightly quality.\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n parser.add_argument(\"-o\",\n \"--out\",\n type=Path,\n default=Path(\"separated\"),\n help=\"Folder where to put extracted tracks. A subfolder \"\n \"with the model name will be created.\")\n parser.add_argument(\"--models\",\n type=Path,\n default=Path(\"models\"),\n help=\"Path to trained models. \"\n \"Also used to store downloaded pretrained models\")\n parser.add_argument(\"--dl\",\n action=\"store_true\",\n help=\"Automatically download model if missing.\")\n parser.add_argument(\"-d\",\n \"--device\",\n default=\"cuda\" if th.cuda.is_available() else \"cpu\",\n help=\"Device to use, default is cuda if available else cpu\")\n parser.add_argument(\"--shifts\",\n default=0,\n type=int,\n help=\"Number of random shifts for equivariant stabilization.\"\n \"Increase separation time but improves quality for Demucs. 10 was used \"\n \"in the original paper.\")\n parser.add_argument(\"--nosplit\",\n action=\"store_false\",\n default=True,\n dest=\"split\",\n help=\"Apply the model to the entire input at once rather than \"\n \"first splitting it in chunks of 10 seconds. Will OOM with Tasnet \"\n \"but will work fine for Demucs if you have at least 16GB of RAM.\")\n parser.add_argument(\"--float32\",\n action=\"store_true\",\n help=\"Convert the output wavefile to use pcm f32 format instead of s16. \"\n \"This should not make a difference if you just plan on listening to the \"\n \"audio but might be needed to compute exactly metrics like SDR etc.\")\n parser.add_argument(\"--int16\",\n action=\"store_false\",\n dest=\"float32\",\n help=\"Opposite of --float32, here for compatibility.\")\n parser.add_argument(\"--mp3\", action=\"store_true\",\n help=\"Convert the output wavs to mp3 with 320 kb/s rate.\")\n\n args = parser.parse_args()\n name = args.name + \".th\"\n if args.quantized:\n name += \".gz\"\n\n model_path = args.models / name\n sha256 = PRETRAINED_MODELS.get(name)\n if not model_path.is_file():\n if sha256 is None:\n print(f\"No pretrained model {args.name}\", file=sys.stderr)\n sys.exit(1)\n if not args.dl:\n print(\n f\"Could not find model {model_path}, however a matching pretrained model exist, \"\n \"to download it, use --dl\",\n file=sys.stderr)\n sys.exit(1)\n args.models.mkdir(exist_ok=True, parents=True)\n url = BASE_URL + name\n print(\"Downloading pre-trained model weights, this could take a while...\")\n download_file(url, model_path)\n if sha256 is not None:\n verify_file(model_path, sha256)\n model = load_model(model_path).to(args.device)\n if args.quantized:\n args.name += \"_quantized\"\n out = args.out / args.name\n out.mkdir(parents=True, exist_ok=True)\n source_names = [\"drums\", \"bass\", \"other\", \"vocals\"]\n print(f\"Separated tracks will be stored in {out.resolve()}\")\n for track in args.tracks:\n if not track.exists():\n print(\n f\"File {track} does not exist. If the path contains spaces, \"\n \"please try again after surrounding the entire path with quotes \\\"\\\".\",\n file=sys.stderr)\n continue\n print(f\"Separating track {track}\")\n wav = AudioFile(track).read(streams=0, samplerate=44100, channels=2).to(args.device)\n # Round to nearest short integer for compatibility with how MusDB load audio with stempeg.\n wav = (wav * 2**15).round() / 2**15\n ref = wav.mean(0)\n wav = (wav - ref.mean()) / ref.std()\n sources = apply_model(model, wav, shifts=args.shifts, split=args.split, progress=True)\n sources = sources * ref.std() + ref.mean()\n\n track_folder = out / track.name.split(\".\")[0]\n track_folder.mkdir(exist_ok=True)\n for source, name in zip(sources, source_names):\n if args.mp3 or not args.float32:\n source = (source * 2**15).clamp_(-2**15, 2**15 - 1).short()\n source = source.cpu().transpose(0, 1).numpy()\n stem = str(track_folder / name)\n if args.mp3:\n encode_mp3(source, stem + \".mp3\", verbose=args.verbose)\n else:\n wavname = str(track_folder / f\"{name}.wav\")\n wavfile.write(wavname, 44100, source)\n\n\nif __name__ == \"__main__\":\n main()\n",
"import math\nimport torch\nfrom torch.autograd import Variable\nfrom cupy.cuda import function\nfrom pynvrtc.compiler import Program\nfrom collections import namedtuple\n\n###\n# ForgetMult implementation copied from\n# https://github.com/salesforce/pytorch-qrnn/blob/master/torchqrnn/forget_mult.py#L10\n#\n\nkernel = '''\nextern \"C\"\n__global__ void recurrent_forget_mult(float *dst, const float *f, const float *x, int SEQ, int BATCH, int HIDDEN)\n{\n /*\n Note: destination is assumed to be one timestep longer than f or x where dst[0] = h_{-1}\n This means dst array has a separate index than that of f or x\n */\n int hid = blockIdx.x * blockDim.x + threadIdx.x;\n int bid = blockIdx.y * blockDim.y + threadIdx.y;\n if(hid >= HIDDEN || bid >= BATCH)\n return;\n //\n for (int ts = 0 + 1; ts < SEQ + 1; ts++) {\n // Good sanity check for debugging - only perform additions to a zeroed chunk of memory\n // Addition seems atomic or near atomic - you should get incorrect answers if doubling up via threads\n // Note: the index i needs to be offset by one as f[0] (f_t) is used for dst[1] (h_t) etc\n // To move timesteps, we step HIDDEN * BATCH\n // To move batches, we move HIDDEN\n // To move neurons, we move +- 1\n // Note: dst[dst_i] = ts * 100 + bid * 10 + hid; is useful for debugging\n int i = (ts - 1) * HIDDEN * BATCH + bid * HIDDEN + hid;\n int dst_i = (ts - 0) * HIDDEN * BATCH + bid * HIDDEN + hid;\n int dst_iminus1 = (ts - 1) * HIDDEN * BATCH + bid * HIDDEN + hid;\n dst[dst_i] = f[i] * x[i];\n dst[dst_i] += (1 - f[i]) * dst[dst_iminus1];\n }\n}\nextern \"C\"\n__global__ void bwd_recurrent_forget_mult(const float *h, const float *f, const float *x, const float *gh, float *gf,\n float *gx, float *ghinit, int SEQ, int BATCH, int HIDDEN)\n{\n /*\n Note: h is assumed to be one timestep longer than f, x, gf, gx, or gh where dst[0] = h_{-1}\n This means dst array has a separate index than that of f or x\n */\n int hid = blockIdx.x * blockDim.x + threadIdx.x;\n int bid = blockIdx.y * blockDim.y + threadIdx.y;\n if(hid >= HIDDEN || bid >= BATCH)\n return;\n //\n double running_f = 0;\n for (int ts = SEQ - 1 + 1; ts >= 0 + 1; ts--) {\n int i = (ts - 1) * HIDDEN * BATCH + bid * HIDDEN + hid;\n int dst_i = (ts - 0) * HIDDEN * BATCH + bid * HIDDEN + hid;\n int dst_iminus1 = (ts - 1) * HIDDEN * BATCH + bid * HIDDEN + hid;\n //\n running_f += gh[dst_iminus1];\n // Gradient of X\n gx[i] = f[i] * running_f;\n // Gradient of F\n gf[i] = (x[i] - h[dst_iminus1]) * running_f;\n //\n // The line below is likely more numerically stable than (1 - f[i]) * running_f;\n running_f = running_f - f[i] * running_f;\n }\n ghinit[bid * HIDDEN + hid] = running_f;\n}\n'''\n\n\ndef forget_mult(f, x, hidden_init=None):\n # autograd forget_mult implementation\n result = []\n forgets = f.split(1, dim=0)\n prev_h = hidden_init\n for i, h in enumerate((f * x).split(1, dim=0)):\n if prev_h is not None:\n h = h + (1 - forgets[i]) * prev_h\n # h is (1, batch, hidden) when it needs to be (batch_hidden)\n # Calling squeeze will result in badness if batch size is 1\n h = h.view(h.size()[1:])\n result.append(h)\n prev_h = h\n return torch.stack(result)\n\n\nclass GPUForgetMult(torch.autograd.Function):\n # special kernel for forget_mult\n configured_gpus = {}\n ptx = None\n\n def __init__(self):\n super(GPUForgetMult, self).__init__()\n\n def compile(self):\n if self.ptx is None:\n program = Program(kernel.encode(),\n 'recurrent_forget_mult.cu'.encode())\n GPUForgetMult.ptx = program.compile()\n\n if torch.cuda.current_device() not in GPUForgetMult.configured_gpus:\n m = function.Module()\n m.load(bytes(self.ptx.encode()))\n\n self.forget_mult = m.get_function('recurrent_forget_mult')\n self.bwd_forget_mult = m.get_function('bwd_recurrent_forget_mult')\n\n Stream = namedtuple('Stream', ['ptr'])\n self.stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)\n\n GPUForgetMult.configured_gpus[torch.cuda.current_device()] = (\n self.forget_mult, self.bwd_forget_mult, self.stream)\n\n self.forget_mult, self.bwd_forget_mult, self.stream = (\n GPUForgetMult.configured_gpus[torch.cuda.current_device()])\n\n def forward(self, f, x, hidden_init=None):\n self.compile()\n seq_size, batch_size, hidden_size = f.size()\n result = f.new(seq_size + 1, batch_size, hidden_size)\n # We only zero the result array (result[0]) if we don't set a hidden\n # initial state\n # All other values (result[1:]) are overwritten by default\n if hidden_init is not None:\n result[0, :, :] = hidden_init\n else:\n result = result.zero_()\n ###\n grid_hidden_size = min(hidden_size, 512)\n grid = (math.ceil(hidden_size / grid_hidden_size), batch_size)\n self.forget_mult(grid=grid, block=(grid_hidden_size, 1), args=[\n result.data_ptr(),\n f.data_ptr(),\n x.data_ptr(),\n seq_size,\n batch_size,\n hidden_size], stream=self.stream)\n self.save_for_backward(f, x, hidden_init)\n self.result = result\n return result[1:, :, :]\n\n def backward(self, grad_h):\n self.compile()\n f, x, hidden_init = self.saved_tensors\n h = self.result\n ###\n seq_size, batch_size, hidden_size = f.size()\n # Zeroing is not necessary as these will be overwritten\n grad_f = f.new(*f.size())\n grad_x = f.new(*f.size())\n grad_h_init = f.new(batch_size, hidden_size)\n ###\n grid_hidden_size = min(hidden_size, 512)\n grid = (math.ceil(hidden_size / grid_hidden_size), batch_size)\n self.bwd_forget_mult(grid=grid, block=(grid_hidden_size, 1), args=[\n h.data_ptr(), f.data_ptr(), x.data_ptr(), grad_h.data_ptr(),\n grad_f.data_ptr(), grad_x.data_ptr(), grad_h_init.data_ptr(),\n seq_size, batch_size, hidden_size], stream=self.stream)\n ###\n if hidden_init is not None:\n return grad_f, grad_x, grad_h_init\n return grad_f, grad_x\n\n\nclass ForgetMult(torch.nn.Module):\n r\"\"\"ForgetMult computes a simple recurrent equation:\n h_t = f_t * x_t + (1 - f_t) * h_{t-1}\n This equation is equivalent to dynamic weighted averaging.\n Inputs: X, hidden\n - X (seq_len, batch, input_size): tensor containing the features of the input sequence.\n - F (seq_len, batch, input_size): tensor containing the forget gate values, assumed in range [0, 1].\n - hidden_init (batch, input_size): tensor containing the initial hidden state for the recurrence (h_{t-1}).\n - use_kernel: If True, use the fast element-wise CUDA kernel for recurrence.\n If False, uses naive for loop. Default: True.\n \"\"\"\n\n def __init__(self, use_kernel=False, jit=False):\n super(ForgetMult, self).__init__()\n self.use_kernel = use_kernel\n if use_kernel:\n assert torch.cuda.is_available()\n\n if use_kernel:\n print(\"Using ForgetMult kernel\")\n self.forget_mult = GPUForgetMult()\n else:\n print(\"Not using ForgetMult kernel\")\n self.forget_mult = forget_mult\n\n if jit:\n self.traced = False\n self.jit = jit\n\n def forward(self, f, x, hidden_init=None):\n if self.use_kernel:\n assert f.is_cuda and x.is_cuda\n # Because this is a function, we remake it each time\n self.forget_mult = GPUForgetMult()\n\n if self.jit and not self.traced:\n print(\"Tracing...\")\n if hidden_init is None:\n self.forget_mult = torch.jit.trace(f, x)(self.forget_mult)\n else:\n self.forget_mult = torch.jit.trace(f, x)(hidden_init)\n self.traced = True\n\n # Avoiding 'RuntimeError: expected a Variable argument, but got\n # NoneType' when hidden_init is None\n if hidden_init is None:\n return self.forget_mult(f, x)\n return self.forget_mult(f, x, hidden_init)\n\n\ndef test_accuracy():\n seq, batch, hidden = 35, 20, 650\n # Larger input (batch * seq * hidden) results in excessive memory for gradient check\n seq, batch, hidden = 3, 7, 19\n a = Variable(torch.rand(seq, batch, hidden).cuda(), requires_grad=True)\n forget = Variable(torch.rand(seq, batch, hidden).cuda(), requires_grad=True)\n last_h = Variable(torch.rand(batch, hidden).cuda(), requires_grad=True)\n\n # seq, batch, hidden = 4, 1, 1\n # a = Variable(torch.Tensor([0.75, 0.5, 0.9, 0.8]).view(seq, batch, hidden).cuda(), requires_grad=True)\n # forget = Variable(torch.Tensor([0.25, 0.25, 0.5, 0.4]).view(seq, batch, hidden).cuda(), requires_grad=True)\n # last_h = Variable(torch.Tensor([0]).view(batch, hidden).cuda(), requires_grad=True)\n # print(forget, a, last_h)\n\n print('CUDA forget mult')\n print('=-=-' * 5)\n\n resulta = ForgetMult(use_kernel=True)(forget, a, last_h)\n print(resulta.size())\n loss = resulta.pow(2).sum()\n loss.backward()\n\n print('Result =', loss.item())\n print('X grad =', a.grad.mean().item())\n print('Forget grad =', forget.grad.mean().item())\n print('Last H grad =', last_h.grad.mean().item())\n\n x_grad_copy = a.grad.clone()\n\n print()\n print('CPU forget mult')\n print('=-=-' * 5)\n\n a.grad.data *= 0\n forget.grad.data *= 0\n last_h.grad.data *= 0\n\n resultb = ForgetMult(use_kernel=False)(forget, a, last_h)\n print(resultb.size())\n loss = resultb.pow(2).sum()\n loss.backward()\n\n print('Result =', loss.item())\n print('X grad =', a.grad.mean().item())\n print('Forget grad =', forget.grad.mean().item())\n print('Last H grad =', last_h.grad.mean().item())\n\n ###\n\n print()\n print('=-=-' * 5)\n print('(Xgrad - Xgrad).sum() =', (x_grad_copy - a.grad).sum().item())\n print('Residual error for result')\n print('=-=-' * 5)\n residual = (resulta - resultb)\n print(residual.abs().sum().item())\n\n # Had to loosen gradient checking, potentially due to general floating\n # point badness?\n from torch.autograd import gradcheck\n inputs = [forget, a, last_h]\n test = gradcheck(ForgetMult(), inputs, eps=1e-4, atol=1e-2)\n print(test)\n\n\nif __name__ == '__main__':\n test_accuracy()\n",
"import torch\nfrom torch.autograd import Variable\nimport torch.jit\nimport torch.nn\n\nimport argparse\nimport pprint\nimport gc\nimport time\nimport sys\n\nif __name__ == '__main__':\n from benchmark_common import benchmark_init\n from common import Bench, tag\nelse:\n from .benchmark_common import benchmark_init\n from .common import Bench, tag\n\n\ndef run_cudnn_lstm(cpu=0, gpu=0, batch_size=1, input_size=256, hidden_size=512,\n layers=1, seq_len=512, warmup=10, benchmark=30, backward=False,\n skip_cpu_governor_check=False):\n\n benchmark_init(cpu, gpu, skip_cpu_governor_check)\n\n def V(x):\n return Variable(x) # mandatory\n\n input = V(torch.randn(seq_len, batch_size, input_size).cuda(gpu))\n hx = V(torch.randn(layers, batch_size, hidden_size).cuda(gpu))\n cx = V(torch.randn(layers, batch_size, hidden_size).cuda(gpu))\n\n lstm = torch.nn.LSTM(input_size, hidden_size, layers).cuda(gpu)\n lstm.flatten_parameters()\n\n iter_timer = Bench(name='lstm_cudnn', cuda=True, warmup_iters=warmup)\n\n for i in range(warmup + benchmark):\n gc.collect()\n with iter_timer:\n hx_t, cx_t = lstm(input, (hx, cx))\n if backward:\n hx_t.sum().backward()\n\n return iter_timer\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"PyTorch CuDNN LSTM benchmark.\")\n parser.add_argument('--cpu', type=int, default=0, help=\"CPU to run on\")\n parser.add_argument('--gpu', type=int, default=0, help=\"GPU to run on\")\n parser.add_argument('--batch-size', type=int, default=1, help=\"Batch size\")\n parser.add_argument('--input-size', type=int, default=256, help=\"Input size\")\n parser.add_argument('--hidden-size', type=int, default=512, help=\"Hidden size\")\n parser.add_argument('--layers', type=int, default=1, help=\"Layers\")\n parser.add_argument('--seq-len', type=int, default=512, help=\"Sequence length\")\n parser.add_argument('--warmup', type=int, default=10, help=\"Warmup iterations\")\n parser.add_argument('--benchmark', type=int, default=30, help=\"Benchmark iterations\")\n parser.add_argument('--skip-cpu-governor-check', action='store_true',\n help=\"Skip checking whether CPU governor is set to `performance`\")\n parser.add_argument('--backward', action='store_true', help=\"time backward\")\n args = parser.parse_args()\n pprint.pprint(vars(args))\n\n run_cudnn_lstm(**vars(args))\n",
"r\"\"\"\n.. todo::\n doc\n\"\"\"\n\n__all__ = [\n \"Padder\",\n \"AutoPadder\",\n \"EngChar2DPadder\",\n]\n\nfrom abc import abstractmethod\nfrom collections import Counter\nfrom copy import deepcopy\nfrom numbers import Number\nfrom typing import Any\n\nimport numpy as np\nimport torch\n\nfrom ._logger import logger\nfrom .utils import _is_iterable\n\n\nclass SetInputOrTargetException(Exception):\n def __init__(self, msg, index=None, field_name=None):\n super().__init__(msg)\n self.msg = msg\n self.index = index # 标示在哪个数据遭遇到问题了\n self.field_name = field_name # 标示当前field的名称\n\n\nclass AppendToTargetOrInputException(Exception):\n def __init__(self, msg, index=None, field_name=None):\n super().__init__(msg)\n self.msg = msg\n self.index = index # 标示在哪个数据遭遇到问题了\n self.field_name = field_name # 标示当前field的名称\n\n\nclass FieldArray:\n def __init__(self, name, content, is_target=False, is_input=False, padder=None, ignore_type=False,\n use_1st_ins_infer_dim_type=True):\n if len(content) == 0:\n raise RuntimeError(\"Empty fieldarray is not allowed.\")\n _content = content\n try:\n _content = list(_content)\n except BaseException as e:\n logger.error(f\"Cannot convert content(of type:{type(content)}) into list.\")\n raise e\n self.name = name\n self.content = _content\n self._ignore_type = ignore_type\n # 根据input的情况设置input,target等\n self._cell_ndim = None # 多少维度, 如果value是1, dim为0; 如果value是[1, 2], dim=2\n self.dtype = None # 最内层的element都是什么类型的\n self._use_1st_ins_infer_dim_type = bool(use_1st_ins_infer_dim_type)\n self._is_input = False\n self._is_target = False\n \n if is_input:\n self.is_input = is_input\n if is_target:\n self.is_target = is_target\n \n if padder is None:\n padder = AutoPadder(pad_val=0)\n else:\n assert isinstance(padder, Padder), \"padder must be of type fastNLP.Padder.\"\n padder = deepcopy(padder)\n self.set_padder(padder)\n \n @property\n def ignore_type(self):\n return self._ignore_type\n \n @ignore_type.setter\n def ignore_type(self, value):\n if value:\n self._cell_ndim = None\n self.dtype = None\n self._ignore_type = value\n \n @property\n def is_input(self):\n return self._is_input\n \n @is_input.setter\n def is_input(self, value):\n r\"\"\"\n 当 field_array.is_input = True / False 时被调用\n \"\"\"\n # 如果(value为True)且(_is_input和_is_target都是False)且(ignore_type为False)\n if value is True and \\\n self._is_target is False and \\\n self._ignore_type is False:\n self._check_dtype_and_ndim(only_check_1st_ins_dim_type=self._use_1st_ins_infer_dim_type)\n if value is False and self._is_target is False:\n self.dtype = None\n self._cell_ndim = None\n self._is_input = value\n \n @property\n def is_target(self):\n return self._is_target\n \n @is_target.setter\n def is_target(self, value):\n r\"\"\"\n 当 field_array.is_target = True / False 时被调用\n \"\"\"\n if value is True and \\\n self._is_input is False and \\\n self._ignore_type is False:\n self._check_dtype_and_ndim(only_check_1st_ins_dim_type=self._use_1st_ins_infer_dim_type)\n if value is False and self._is_input is False:\n self.dtype = None\n self._cell_ndim = None\n self._is_target = value\n \n def _check_dtype_and_ndim(self, only_check_1st_ins_dim_type=True):\n r\"\"\"\n 检查当前content所有的element是否是同一个类型,且是否每个元素具有相同的维度。通过的话,设置_cell_ndim与_ele_type属性;没有\n 通过将直接报错.\n\n :param bool only_check_1st_ins_dim_type: 是否只检查第一个元素的type和dim\n :return:\n \"\"\"\n cell_0 = self.content[0]\n index = 0\n try:\n type_0, dim_0 = _get_ele_type_and_dim(cell_0)\n if not only_check_1st_ins_dim_type:\n for cell in self.content[1:]:\n index += 1\n type_i, dim_i = _get_ele_type_and_dim(cell)\n if type_i != type_0:\n raise SetInputOrTargetException(\n \"Type:{} in index {} is different from the first element with type:{}.\"\n \".\".format(type_i, index, type_0))\n if dim_0 != dim_i:\n raise SetInputOrTargetException(\n \"Dimension:{} in index {} is different from the first element with \"\n \"dimension:{}.\".format(dim_i, index, dim_0))\n self._cell_ndim = dim_0\n self.dtype = type_0\n except SetInputOrTargetException as e:\n e.index = index\n raise e\n \n def append(self, val: Any):\n r\"\"\"\n :param val: 把该val append到fieldarray。\n :return:\n \"\"\"\n if (self._is_target or self._is_input) and self._ignore_type is False and not self._use_1st_ins_infer_dim_type:\n type_, dim_ = _get_ele_type_and_dim(val)\n if self.dtype != type_:\n raise AppendToTargetOrInputException(f\"Value(type:{type_}) are of different types with \"\n f\"previous values(type:{self.dtype}).\")\n if self._cell_ndim != dim_:\n raise AppendToTargetOrInputException(f\"Value(dim:{dim_}) are of different dimensions with \"\n f\"previous values(dim:{self._cell_ndim}).\")\n self.content.append(val)\n else:\n self.content.append(val)\n \n def pop(self, index):\n r\"\"\"\n 删除该field中index处的元素\n :param int index: 从0开始的数据下标。\n :return:\n \"\"\"\n self.content.pop(index)\n \n def __getitem__(self, indices):\n return self.get(indices, pad=False)\n \n def __setitem__(self, idx, val):\n assert isinstance(idx, int)\n if (self._is_target or self._is_input) and self.ignore_type is False: # 需要检测类型\n type_, dim_ = _get_ele_type_and_dim(val)\n if self.dtype != type_:\n raise RuntimeError(f\"Value(type:{type_}) are of different types with \"\n f\"other values(type:{self.dtype}).\")\n if self._cell_ndim != dim_:\n raise RuntimeError(f\"Value(dim:{dim_}) are of different dimensions with \"\n f\"previous values(dim:{self._cell_ndim}).\")\n self.content[idx] = val\n \n def get(self, indices, pad=True):\n r\"\"\"\n 根据给定的indices返回内容。\n\n :param int,List[int] indices: 获取indices对应的内容。\n :param bool pad: 是否对返回的结果进行padding。仅对: (1) indices为List[int]; (2)padder不为None; (3)field设置了input\n 或target,有效\n :return: 根据给定的indices返回的内容,可能是单个值或ndarray\n \"\"\"\n if isinstance(indices, int):\n return self.content[indices]\n\n contents = [self.content[i] for i in indices]\n if self.padder is None or pad is False:\n return np.array(contents)\n elif self.is_input or self.is_target:\n return self.pad(contents)\n else:\n return np.array(contents)\n \n def pad(self, contents):\n r\"\"\"\n 传入list的contents,将contents使用padder进行padding,contents必须为从本FieldArray中取出的。\n\n :param list contents:\n :return:\n \"\"\"\n return self.padder(contents, field_name=self.name, field_ele_dtype=self.dtype, dim=self._cell_ndim)\n \n def set_padder(self, padder):\n r\"\"\"\n 设置padder,在这个field进行pad的时候用这个padder进行pad,如果为None则不进行pad。\n\n :param padder: :class:`~fastNLP.Padder` 类型,设置为None即删除padder。\n \"\"\"\n if padder is not None:\n assert isinstance(padder, Padder), \"padder must be of type Padder.\"\n self.padder = deepcopy(padder)\n else:\n self.padder = None\n \n def set_pad_val(self, pad_val):\n r\"\"\"\n 修改padder的pad_val.\n\n :param int pad_val: 该field的pad值设置为该值。\n \"\"\"\n if self.padder is not None:\n self.padder.set_pad_val(pad_val)\n return self\n \n def __len__(self):\n r\"\"\"\n Returns the size of FieldArray.\n\n :return int length:\n \"\"\"\n return len(self.content)\n \n def to(self, other):\n r\"\"\"\n 将other的属性复制给本FieldArray(other必须为FieldArray类型).\n 属性包括 is_input, is_target, padder, ignore_type\n\n :param other: :class:`~fastNLP.FieldArray` 从哪个field拷贝属性\n :return: :class:`~fastNLP.FieldArray`\n \"\"\"\n assert isinstance(other, FieldArray), \"Only supports fastNLP.FieldArray type, not {}.\".format(type(other))\n \n self.ignore_type = other.ignore_type\n self.is_input = other.is_input\n self.is_target = other.is_target\n self.padder = other.padder\n \n return self\n \n def split(self, sep: str = None, inplace: bool = True):\n r\"\"\"\n 依次对自身的元素使用.split()方法,应该只有当本field的元素为str时,该方法才有用。将返回值\n\n :param sep: 分割符,如果为None则直接调用str.split()。\n :param inplace: 如果为True,则将新生成值替换本field。否则返回list。\n :return: List[List[str]] or self\n \"\"\"\n new_contents = []\n for index, cell in enumerate(self.content):\n try:\n new_contents.append(cell.split(sep))\n except Exception as e:\n logger.error(f\"Exception happens when process value in index {index}.\")\n raise e\n return self._after_process(new_contents, inplace=inplace)\n \n def int(self, inplace: bool = True):\n r\"\"\"\n 将本field中的值调用int(cell). 支持field中内容为以下两种情况(1)['1', '2', ...](即field中每个值为str的),\n (2) [['1', '2', ..], ['3', ..], ...](即field中每个值为一个list,list中的值会被依次转换。)\n\n :param inplace: 如果为True,则将新生成值替换本field。否则返回list。\n :return: List[int], List[List[int]], self\n \"\"\"\n new_contents = []\n for index, cell in enumerate(self.content):\n try:\n if isinstance(cell, list):\n new_contents.append([int(value) for value in cell])\n else:\n new_contents.append(int(cell))\n except Exception as e:\n logger.error(f\"Exception happens when process value in index {index}.\")\n raise e\n return self._after_process(new_contents, inplace=inplace)\n \n def float(self, inplace=True):\n r\"\"\"\n 将本field中的值调用float(cell). 支持field中内容为以下两种情况(1)['1', '2', ...](即field中每个值为str的),\n (2) [['1', '2', ..], ['3', ..], ...](即field中每个值为一个list,list中的值会被依次转换。)\n\n :param inplace: 如果为True,则将新生成值替换本field。否则返回list。\n :return:\n \"\"\"\n new_contents = []\n for index, cell in enumerate(self.content):\n try:\n if isinstance(cell, list):\n new_contents.append([float(value) for value in cell])\n else:\n new_contents.append(float(cell))\n except Exception as e:\n logger.error(f\"Exception happens when process value in index {index}.\")\n raise e\n return self._after_process(new_contents, inplace=inplace)\n \n def bool(self, inplace=True):\n r\"\"\"\n 将本field中的值调用bool(cell). 支持field中内容为以下两种情况(1)['1', '2', ...](即field中每个值为str的),\n (2) [['1', '2', ..], ['3', ..], ...](即field中每个值为一个list,list中的值会被依次转换。)\n\n :param inplace: 如果为True,则将新生成值替换本field。否则返回list。\n :return:\n \"\"\"\n new_contents = []\n for index, cell in enumerate(self.content):\n try:\n if isinstance(cell, list):\n new_contents.append([bool(value) for value in cell])\n else:\n new_contents.append(bool(cell))\n except Exception as e:\n logger.error(f\"Exception happens when process value in index {index}.\")\n raise e\n \n return self._after_process(new_contents, inplace=inplace)\n \n def lower(self, inplace=True):\n r\"\"\"\n 将本field中的值调用cell.lower(). 支持field中内容为以下两种情况(1)['1', '2', ...](即field中每个值为str的),\n (2) [['1', '2', ..], ['3', ..], ...](即field中每个值为一个list,list中的值会被依次转换。)\n\n :param inplace: 如果为True,则将新生成值替换本field。否则返回list。\n :return: List[int], List[List[int]], self\n \"\"\"\n new_contents = []\n for index, cell in enumerate(self.content):\n try:\n if isinstance(cell, list):\n new_contents.append([value.lower() for value in cell])\n else:\n new_contents.append(cell.lower())\n except Exception as e:\n logger.error(f\"Exception happens when process value in index {index}.\")\n raise e\n return self._after_process(new_contents, inplace=inplace)\n \n def upper(self, inplace=True):\n r\"\"\"\n 将本field中的值调用cell.lower(). 支持field中内容为以下两种情况(1)['1', '2', ...](即field中每个值为str的),\n (2) [['1', '2', ..], ['3', ..], ...](即field中每个值为一个list,list中的值会被依次转换。)\n\n :param inplace: 如果为True,则将新生成值替换本field。否则返回list。\n :return: List[int], List[List[int]], self\n \"\"\"\n new_contents = []\n for index, cell in enumerate(self.content):\n try:\n if isinstance(cell, list):\n new_contents.append([value.upper() for value in cell])\n else:\n new_contents.append(cell.upper())\n except Exception as e:\n logger.error(f\"Exception happens when process value in index {index}.\")\n raise e\n return self._after_process(new_contents, inplace=inplace)\n \n def value_count(self):\n r\"\"\"\n 返回该field下不同value的数量。多用于统计label数量\n\n :return: Counter, key是label,value是出现次数\n \"\"\"\n count = Counter()\n \n def cum(cell):\n if _is_iterable(cell) and not isinstance(cell, str):\n for cell_ in cell:\n cum(cell_)\n else:\n count[cell] += 1\n \n for cell in self.content:\n cum(cell)\n return count\n \n def _after_process(self, new_contents, inplace):\n r\"\"\"\n 当调用处理函数之后,决定是否要替换field。\n\n :param new_contents:\n :param inplace:\n :return: self或者生成的content\n \"\"\"\n if inplace:\n self.content = new_contents\n try:\n self.is_input = self.is_input\n self.is_target = self.is_input\n except SetInputOrTargetException as e:\n logger.error(\"The newly generated field cannot be set as input or target.\")\n raise e\n return self\n else:\n return new_contents\n\n\ndef _get_ele_type_and_dim(cell: Any, dim=0):\n r\"\"\"\n 识别cell的类别与dimension的数量\n\n numpy scalar type:https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.scalars.html\n :param cell:\n :param dim:\n :return:\n \"\"\"\n if isinstance(cell, (str, Number, np.bool_)):\n if hasattr(cell, 'dtype'):\n return cell.dtype.type, dim\n return type(cell), dim\n elif isinstance(cell, list):\n dim += 1\n res = [_get_ele_type_and_dim(cell_i, dim) for cell_i in cell]\n types = set([i for i, j in res])\n dims = set([j for i, j in res])\n if len(types) > 1:\n raise SetInputOrTargetException(\"Mixed types detected: {}.\".format(list(types)))\n elif len(types) == 0:\n raise SetInputOrTargetException(\"Empty value encountered.\")\n if len(dims) > 1:\n raise SetInputOrTargetException(\"Mixed dimension detected: {}.\".format(list(dims)))\n return types.pop(), dims.pop()\n elif isinstance(cell, torch.Tensor):\n return cell.dtype, cell.dim() + dim # 如果是torch.mean的结果是0\n elif isinstance(cell, np.ndarray):\n if cell.dtype != np.dtype('O'): # 如果不是object的话说明是well-formatted的了\n return cell.dtype.type, cell.ndim + dim # dtype.type返回的会是np.int32, np.float等\n # 否则需要继续往下iterate\n dim += 1\n res = [_get_ele_type_and_dim(cell_i, dim) for cell_i in cell]\n types = set([i for i, j in res])\n dims = set([j for i, j in res])\n if len(types) > 1:\n raise SetInputOrTargetException(\"Mixed types detected: {}.\".format(list(types)))\n elif len(types) == 0:\n raise SetInputOrTargetException(\"Empty value encountered.\")\n if len(dims) > 1:\n raise SetInputOrTargetException(\"Mixed dimension detected: {}.\".format(list(dims)))\n return types.pop(), dims.pop()\n else: # 包含tuple, set, dict以及其它的类型\n raise SetInputOrTargetException(f\"Cannot process type:{type(cell)}.\")\n\n\nclass Padder:\n r\"\"\"\n 所有padder都需要继承这个类,并覆盖__call__方法。\n 用于对batch进行padding操作。传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前deepcopy一份。\n\n .. py:function:: __call__(self, contents, field_name, field_ele_dtype):\n \n \"\"\"\n \n def __init__(self, pad_val=0, **kwargs):\n r\"\"\"\n \n :param List[Any] contents: 传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前\n deepcopy一份。\n :param str, field_name: field的名称。\n :param np.int64,np.float64,np.str,None, field_ele_dtype: 该field的内层元素的类型。如果该field的ignore_type为True,该这个值为None。\n :return: np.array([padded_element])\n \"\"\"\n self.pad_val = pad_val\n \n def set_pad_val(self, pad_val):\n self.pad_val = pad_val\n\n def get_pad_val(self):\n return self.pad_val\n\n @abstractmethod\n def __call__(self, contents, field_name, field_ele_dtype, dim: int):\n r\"\"\"\n 传入的是List内容。假设有以下的DataSet。\n\n :param List[Any] contents: 传入的element是inplace的,即直接修改element可能导致数据变化,建议inplace修改之前\n deepcopy一份。\n :param str, field_name: field的名称。\n :param np.int64,np.float64,np.str,None, field_ele_dtype: 该field的内层元素的类型。如果该field的ignore_type为True,\n 该这个值为None。\n :param dim: 这个field的维度。当ignore_type为True时,该值为None\n :return: np.array([padded_element])\n\n Example::\n\n from fastNLP import DataSet\n from fastNLP import Instance\n dataset = DataSet()\n dataset.append(Instance(sent='this is a demo', length=4,\n chars=[['t', 'h', 'i', 's'], ['i', 's'], ['a'], ['d', 'e', 'm', 'o']]))\n dataset.append(Instance(sent='another one', length=2,\n chars=[['a', 'n', 'o', 't', 'h', 'e', 'r'], ['o', 'n', 'e']]))\n 如果调用\n batch = dataset.get([0,1], pad=True)\n sent这个field的padder的__call__会接收到的内容会是\n [\n 'this is a demo',\n 'another one'\n ]\n\n length这个field的padder的__call__会接收到的内容会是\n [4, 2]\n\n chars这个field的padder的__call__会接收到的内容会是\n [\n [['t', 'h', 'i', 's'], ['i', 's'], ['a'], ['d', 'e', 'm', 'o']],\n [['a', 'n', 'o', 't', 'h', 'e', 'r'], ['o', 'n', 'e']]\n ]\n\n 即把每个instance中某个field的内容合成一个List传入\n\n \"\"\"\n raise NotImplementedError\n\n\nclass AutoPadder(Padder):\n r\"\"\"\n 根据contents的数据自动判定是否需要做padding。\n\n 1 如果元素类型(元素类型是指field中最里层元素的数据类型, 可以通过FieldArray.dtype查看,比如['This', 'is', ...]的元素类\n 型为str, [[1,2], ...]的元素类型为int)的数据不为数值类型则不会进行pad\n\n 2 如果元素类型为数值类型,比如np.int64, np.float64, int, float, torch.int64等\n\n 2.1 如果该field的内容为数值类型(包括int, float等),比如为seq_len, 则不进行padding\n\n 2.2 如果该field的内容等价于一维list, 那么会将Batch中的List pad为一样长。\n\n 2.3 如果该field的内容等价于二维list,那么会按照英语character padding的方式进行padding。如果是character padding建议使用\n :class: fastNLP.EngChar2DPadder.\n\n 2.4 如果该field的内容等价于三维list,则如果每个instance在每个维度上相等,会组成一个batch的tensor返回,这种情况应该是为图片\n 的情况。\n\n 3 其它情况不进行处理,返回一个np.array类型。\n \"\"\"\n \n def __init__(self, pad_val=0):\n super().__init__(pad_val=pad_val)\n \n def __call__(self, contents, field_name, field_ele_dtype, dim):\n if field_ele_dtype:\n if dim > 3:\n return np.array(contents)\n if isinstance(field_ele_dtype, type) and \\\n (issubclass(field_ele_dtype, np.number) or issubclass(field_ele_dtype, Number)):\n if dim == 0:\n array = np.array(contents, dtype=field_ele_dtype)\n elif dim == 1:\n max_len = max(map(len, contents))\n array = np.full((len(contents), max_len), self.pad_val, dtype=field_ele_dtype)\n for i, content_i in enumerate(contents):\n array[i, :len(content_i)] = content_i\n elif dim == 2:\n max_len = max(map(len, contents))\n max_word_len = max([max([len(content_ii) for content_ii in content_i]) for\n content_i in contents])\n array = np.full((len(contents), max_len, max_word_len), self.pad_val, dtype=field_ele_dtype)\n for i, content_i in enumerate(contents):\n for j, content_ii in enumerate(content_i):\n array[i, j, :len(content_ii)] = content_ii\n else:\n shape = np.shape(contents)\n if len(shape) == 4: # 说明各dimension是相同的大小\n array = np.array(contents, dtype=field_ele_dtype)\n else:\n raise RuntimeError(\n f\"Field:{field_name} has 3 dimensions, every sample should have the same shape.\")\n return array\n elif str(field_ele_dtype).startswith('torch'):\n if dim == 0:\n tensor = torch.tensor(contents).to(field_ele_dtype)\n elif dim == 1:\n max_len = max(map(len, contents))\n tensor = torch.full((len(contents), max_len), fill_value=self.pad_val, dtype=field_ele_dtype)\n for i, content_i in enumerate(contents):\n tensor[i, :len(content_i)] = content_i.clone().detach()\n elif dim == 2:\n max_len = max(map(len, contents))\n max_word_len = max([max([len(content_ii) for content_ii in content_i]) for\n content_i in contents])\n tensor = torch.full((len(contents), max_len, max_word_len), fill_value=self.pad_val,\n dtype=field_ele_dtype)\n for i, content_i in enumerate(contents):\n for j, content_ii in enumerate(content_i):\n tensor[i, j, :len(content_ii)] = content_ii.clone().detach()\n else:\n shapes = set([np.shape(content_i) for content_i in contents])\n if len(shapes) > 1:\n raise RuntimeError(\n f\"Field:{field_name} has 3 dimensions, every sample should have the same shape.\")\n shape = shapes.pop()\n if len(shape) == 3:\n tensor = torch.full([len(contents)] + list(shape), fill_value=self.pad_val,\n dtype=field_ele_dtype)\n for i, content_i in enumerate(contents):\n tensor[i] = content_i.clone().detach().to(field_ele_dtype)\n else:\n raise RuntimeError(\n f\"Field:{field_name} has 3 dimensions, every sample should have the same shape.\")\n return tensor\n else:\n return np.array(contents) # 不进行任何操作\n else:\n return np.array(contents)\n\n\nclass EngChar2DPadder(Padder):\n r\"\"\"\n 用于为英语执行character级别的2D padding操作。对应的field内容应该类似[['T', 'h', 'i', 's'], ['a'], ['d', 'e', 'm', 'o']],\n 但这个Padder只能处理index为int的情况。\n\n padded过后的batch内容,形状为(batch_size, max_sentence_length, max_word_length). max_sentence_length为这个batch中最大句\n 子长度;max_word_length为这个batch中最长的word的长度::\n\n from fastNLP import DataSet\n from fastNLP import EngChar2DPadder\n from fastNLP import Vocabulary\n dataset = DataSet({'sent': ['This is the first demo', 'This is the second demo']})\n dataset.apply(lambda ins:[list(word) for word in ins['sent'].split()], new_field_name='chars')\n vocab = Vocabulary()\n vocab.from_dataset(dataset, field_name='chars')\n vocab.index_dataset(dataset, field_name='chars')\n dataset.set_input('chars')\n padder = EngChar2DPadder()\n dataset.set_padder('chars', padder) # chars这个field的设置为了EnChar2DPadder\n\n \"\"\"\n \n def __init__(self, pad_val=0, pad_length=0):\n r\"\"\"\n :param pad_val: int, pad的位置使用该index\n :param pad_length: int, 如果为0则取一个batch中最大的单词长度作为padding长度。如果为大于0的数,则将所有单词的长度\n 都pad或截取到该长度.\n \"\"\"\n super().__init__(pad_val=pad_val)\n \n self.pad_length = pad_length\n \n def __call__(self, contents, field_name, field_ele_dtype, dim):\n r\"\"\"\n 期望输入类似于\n [\n [[0, 2], [2, 3, 4], ..],\n [[9, 8, 2, 4], [1, 2,], ...],\n ....\n ]\n\n :param contents:\n :param field_name:\n :param field_ele_dtype\n :return:\n \"\"\"\n if field_ele_dtype not in (np.int64, np.float64, int, float):\n raise TypeError('dtype of Field:{} should be np.int64 or np.float64 to do 2D padding, get {}.'.format(\n field_name, field_ele_dtype\n ))\n assert dim == 2, f\"Field:{field_name} has {dim}, EngChar2DPadder only supports input with 2 dimensions.\"\n if self.pad_length < 1:\n max_char_length = max([max(len(char_lst) for char_lst in word_lst) for word_lst in contents])\n else:\n max_char_length = self.pad_length\n max_sent_length = max(len(word_lst) for word_lst in contents)\n batch_size = len(contents)\n dtype = type(contents[0][0][0])\n \n padded_array = np.full((batch_size, max_sent_length, max_char_length), fill_value=self.pad_val,\n dtype=dtype)\n for b_idx, word_lst in enumerate(contents):\n for c_idx, char_lst in enumerate(word_lst):\n chars = char_lst[:max_char_length]\n padded_array[b_idx, c_idx, :len(chars)] = chars\n \n return padded_array\n",
"import unittest\n\nimport torch\n\nfrom fastNLP.modules.encoder.seq2seq_encoder import TransformerSeq2SeqEncoder, LSTMSeq2SeqEncoder\nfrom fastNLP import Vocabulary\nfrom fastNLP.embeddings import StaticEmbedding\n\n\nclass TestTransformerSeq2SeqEncoder(unittest.TestCase):\n def test_case(self):\n vocab = Vocabulary().add_word_lst(\"This is a test .\".split())\n embed = StaticEmbedding(vocab, embedding_dim=5)\n encoder = TransformerSeq2SeqEncoder(embed, num_layers=2, d_model=10, n_head=2)\n words_idx = torch.LongTensor([0, 1, 2]).unsqueeze(0)\n seq_len = torch.LongTensor([3])\n encoder_output, encoder_mask = encoder(words_idx, seq_len)\n self.assertEqual(encoder_output.size(), (1, 3, 10))\n\n\nclass TestBiLSTMEncoder(unittest.TestCase):\n def test_case(self):\n vocab = Vocabulary().add_word_lst(\"This is a test .\".split())\n embed = StaticEmbedding(vocab, embedding_dim=5)\n encoder = LSTMSeq2SeqEncoder(embed, hidden_size=5, num_layers=1)\n words_idx = torch.LongTensor([0, 1, 2]).unsqueeze(0)\n seq_len = torch.LongTensor([3])\n\n encoder_output, encoder_mask = encoder(words_idx, seq_len)\n self.assertEqual(encoder_mask.size(), (1, 3))\n",
"from fastNLP.core.losses import LossBase\n\nfrom reproduction.coreference_resolution.model.preprocess import get_labels\nfrom reproduction.coreference_resolution.model.config import Config\nimport torch\n\n\nclass SoftmaxLoss(LossBase):\n \"\"\"\n 交叉熵loss\n 允许多标签分类\n \"\"\"\n\n def __init__(self, antecedent_scores=None, target=None, mention_start_tensor=None, mention_end_tensor=None):\n \"\"\"\n\n :param pred:\n :param target:\n \"\"\"\n super().__init__()\n self._init_param_map(antecedent_scores=antecedent_scores, target=target,\n mention_start_tensor=mention_start_tensor, mention_end_tensor=mention_end_tensor)\n\n def get_loss(self, antecedent_scores, target, mention_start_tensor, mention_end_tensor):\n antecedent_labels = get_labels(target[0], mention_start_tensor, mention_end_tensor,\n Config().max_antecedents)\n\n antecedent_labels = torch.from_numpy(antecedent_labels*1).to(torch.device(\"cuda:\" + Config().cuda))\n gold_scores = antecedent_scores + torch.log(antecedent_labels.float()).to(torch.device(\"cuda:\" + Config().cuda)) # [num_mentions, max_ant + 1]\n marginalized_gold_scores = gold_scores.logsumexp(dim=1) # [num_mentions]\n log_norm = antecedent_scores.logsumexp(dim=1) # [num_mentions]\n return torch.sum(log_norm - marginalized_gold_scores)\n"
] | [
[
"scipy.io.wavfile.write",
"torch.cuda.is_available"
],
[
"torch.jit.trace",
"torch.cuda.current_device",
"torch.cuda.current_stream",
"torch.rand",
"torch.cuda.is_available",
"torch.stack"
],
[
"torch.randn",
"torch.nn.LSTM",
"torch.autograd.Variable"
],
[
"numpy.dtype",
"torch.tensor",
"numpy.full",
"numpy.shape",
"numpy.array"
],
[
"torch.LongTensor"
],
[
"torch.sum",
"torch.from_numpy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
flyingwjw/Documentation | [
"567608f388ca369b864c2d75a94647801b5dfa1e",
"567608f388ca369b864c2d75a94647801b5dfa1e",
"567608f388ca369b864c2d75a94647801b5dfa1e"
] | [
"python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 03/ch03-rec12-errorbar.py",
"python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 04/ch04_rec08_fill_under.py",
"python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 03/ch03-rec15-stacked-plot.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\n# generate measures from gaussian distribution \nx = np.arange(0, 10, 1)\n\n# values computed from \"measured\"\ny = np.log(x)\n\n# add some error samples from standard normal distribution \nxe = 0.1 * np.abs(np.random.randn(len(y)))\n\n# draw and show errorbar\nplt.bar(x, y, yerr=xe, width=0.4, align='center', ecolor='r', color='cyan',\n label='experiment #1');\n\n# give some explainations\nplt.xlabel('# measurement')\nplt.ylabel('Measured values')\nplt.title('Measurements')\nplt.legend(loc='upper left')\n\nplt.show()\n",
"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(0.0, 2, 0.01)\ny1 = np.sin(np.pi*x)\ny2 = 1.7*np.sin(4*np.pi*x)\n\nfig = plt.figure()\naxes1 = fig.add_subplot(211)\naxes1.plot(x, y1, x, y2, color='grey')\naxes1.fill_between(x, y1, y2, where=y2<=y1, facecolor='blue', interpolate=True)\naxes1.fill_between(x, y1, y2, where=y2>=y1, facecolor='gold', interpolate=True)\naxes1.set_title('Blue where y2 <= y1. Gold-color where y2 >= y1.')\naxes1.set_ylim(-2,2)\n\n# Mask values in y2 with value greater than 1.0\ny2 = np.ma.masked_greater(y2, 1.0)\naxes2 = fig.add_subplot(212, sharex=axes1)\naxes2.plot(x, y1, x, y2, color='black')\naxes2.fill_between(x, y1, y2, where=y2<=y1, facecolor='blue', interpolate=True)\naxes2.fill_between(x, y1, y2, where=y2>=y1, facecolor='gold', interpolate=True)\naxes2.set_title('Same as above, but mask')\naxes2.set_ylim(-2,2)\naxes2.grid('on')\n\nplt.show()",
"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# We load the data with pandas.\ndf = pd.read_csv('ch03-energy-production.csv')\n\n# We give names for the columns that we want to load. Different types of energy have been ordrered by total production values).\ncolumns = ['Coal', 'Natural Gas (Dry)', 'Crude Oil', 'Nuclear Electric Power',\n 'Biomass Energy', 'Hydroelectric Power', 'Natural Gas Plant Liquids',\n 'Wind Energy', 'Geothermal Energy', 'Solar/PV Energy']\n\n# We define some specific colors to plot each type of energy produced.\ncolors = ['darkslategray', 'powderblue', 'darkmagenta', 'lightgreen', 'sienna',\n'royalblue', 'mistyrose', 'lavender', 'tomato', 'gold']\n\n# Let's create the figure.\nplt.figure(figsize = (12,8))\npolys = plt.stackplot(df['Year'], df[columns].values.T, colors = colors)\n\n# The legend is not yet supported with stackplot. We will add it manually.\nrectangles= []\nfor poly in polys:\n rectangles.append(plt.Rectangle((0, 0), 1, 1, fc=poly.get_facecolor()[0]))\nlegend = plt.legend(rectangles, columns, loc = 3)\nframe = legend.get_frame()\nframe.set_color('white')\n\n# We add some information to the plot.\nplt.title('Primary Energy Production by Source', fontsize = 16)\nplt.xlabel('Year', fontsize = 16)\nplt.ylabel('Production (Quad BTU)', fontsize = 16)\nplt.xticks(fontsize = 16)\nplt.yticks(fontsize = 16)\nplt.xlim(1973,2014)\n\n# Finally we show the figure.\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.log",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.arange",
"numpy.ma.masked_greater",
"numpy.sin",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.stackplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
flysky2008/autogluon | [
"7ad9e5601cf17e616950ae7ef2e84d77b04832e4"
] | [
"autogluon/utils/tabular/ml/trainer/abstract_trainer.py"
] | [
"import copy, time, traceback, logging\nimport os\nfrom typing import List\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nfrom collections import defaultdict\n\nfrom ..constants import BINARY, MULTICLASS, REGRESSION\nfrom ...utils.loaders import load_pkl\nfrom ...utils.savers import save_pkl\nfrom ...utils.exceptions import TimeLimitExceeded, NotEnoughMemoryError\nfrom ..utils import get_pred_from_proba, dd_list, generate_train_test_split\nfrom ..models.abstract.abstract_model import AbstractModel\nfrom ...metrics import accuracy, log_loss, root_mean_squared_error, scorer_expects_y_pred\nfrom ..models.ensemble.bagged_ensemble_model import BaggedEnsembleModel\nfrom ..trainer.model_presets.presets import get_preset_stacker_model\nfrom ..models.ensemble.stacker_ensemble_model import StackerEnsembleModel\nfrom ..models.ensemble.weighted_ensemble_model import WeightedEnsembleModel\nfrom ..trainer.model_presets.presets_distill import get_preset_models_distillation\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Try to optimize for log loss at level 0 for stacking, only optimize for objective func at later levels or in aux models. Might work better.\n# FIXME: Below is major defect!\n# Weird interaction for metrics like AUC during bagging.\n# If kfold = 5, scores are 0.9, 0.85, 0.8, 0.75, and 0.7, the score is not 0.8! It is much lower because probs are combined together and AUC is recalculated\n# Do we want this to happen? Should we calculate score by 5 separate scores and then averaging instead?\n\n# TODO: Add post-fit cleanup function which loads all models and saves them after removing unnecessary variables such as oof_pred_probas to optimize load times and space usage\n# Trainer will not be able to be fit further after this operation is done, but it will be able to predict.\n# TODO: Dynamic model loading for ensemble models during prediction, only load more models if prediction is uncertain. This dynamically reduces inference time.\n# TODO: Try midstack Semi-Supervised. Just take final models and re-train them, use bagged preds for SS rows. This would be very cheap and easy to try.\nclass AbstractTrainer:\n trainer_file_name = 'trainer.pkl'\n\n def __init__(self, path: str, problem_type: str, scheduler_options=None, objective_func=None, stopping_metric=None,\n num_classes=None, low_memory=False, feature_types_metadata={}, kfolds=0, n_repeats=1,\n stack_ensemble_levels=0, time_limit=None, save_data=False, verbosity=2):\n self.path = path\n self.problem_type = problem_type\n self.feature_types_metadata = feature_types_metadata\n self.save_data = save_data\n self.verbosity = verbosity\n if objective_func is not None:\n self.objective_func = objective_func\n elif self.problem_type == BINARY:\n self.objective_func = accuracy\n elif self.problem_type == MULTICLASS:\n self.objective_func = accuracy\n else:\n self.objective_func = root_mean_squared_error\n\n # stopping_metric is used to early stop all models except for aux models.\n if stopping_metric is not None:\n self.stopping_metric = stopping_metric\n elif self.objective_func.name == 'roc_auc':\n self.stopping_metric = log_loss\n else:\n self.stopping_metric = self.objective_func\n\n self.objective_func_expects_y_pred = scorer_expects_y_pred(scorer=self.objective_func)\n logger.log(25, \"AutoGluon will gauge predictive performance using evaluation metric: %s\" % self.objective_func.name)\n if not self.objective_func_expects_y_pred:\n logger.log(25, \"This metric expects predicted probabilities rather than predicted class labels, so you'll need to use predict_proba() instead of predict()\")\n\n logger.log(20, \"To change this, specify the eval_metric argument of fit()\")\n logger.log(25, \"AutoGluon will early stop models using evaluation metric: %s\" % self.stopping_metric.name) # TODO: stopping_metric is likely not used during HPO, fix this\n self.num_classes = num_classes\n self.feature_prune = False # will be set to True if feature-pruning is turned on.\n self.low_memory = low_memory\n self.bagged_mode = True if kfolds >= 2 else False\n if self.bagged_mode:\n self.kfolds = kfolds # int number of folds to do model bagging, < 2 means disabled\n self.stack_ensemble_levels = stack_ensemble_levels\n self.stack_mode = True if self.stack_ensemble_levels >= 1 else False\n self.n_repeats = n_repeats\n else:\n self.kfolds = 0\n self.stack_ensemble_levels = 0\n self.stack_mode = False\n self.n_repeats = 1\n\n self.hyperparameters = {} # TODO: This is currently required for fetching stacking layer models. Consider incorporating more elegantly\n\n # self.models_level_all['core'][0] # Includes base models\n # self.models_level_all['core'][1] # Stacker level 1\n # self.models_level_all['aux1'][1] # Stacker level 1 aux models, such as weighted_ensemble\n # self.models_level_all['core'][2] # Stacker level 2\n self.models_level = defaultdict(dd_list)\n self.models_level_hpo = defaultdict(dd_list) # stores additional models produced during HPO\n\n self.model_best = None\n self.model_best_core = None\n\n self.model_performance = {}\n self.model_paths = {}\n self.model_types = {} # Outer type, can be BaggedEnsemble, StackEnsemble (Type that is able to load the model)\n self.model_types_inner = {} # Inner type, if Ensemble then it is the type of the inner model (May not be able to load with this type)\n self.model_fit_times = {}\n self.model_pred_times = {}\n self.models = {}\n self.reset_paths = False\n\n self.hpo_results = {} # Stores summary of HPO process\n # Scheduler attributes:\n if scheduler_options is not None:\n self.scheduler_func = scheduler_options[0] # unpack tuple\n self.scheduler_options = scheduler_options[1]\n else:\n self.scheduler_func = None\n self.scheduler_options = None\n\n self.time_limit = time_limit\n if self.time_limit is None:\n self.time_limit = 1e7\n self.ignore_time_limit = True\n else:\n self.ignore_time_limit = False\n self.time_train_start = None\n self.time_train_level_start = None\n self.time_limit_per_level = self.time_limit / (self.stack_ensemble_levels + 1)\n\n self.num_rows_train = None\n self.num_cols_train = None\n\n self.is_data_saved = False\n\n # path_root is the directory containing learner.pkl\n @property\n def path_root(self):\n return self.path.rsplit(os.path.sep, maxsplit=2)[0] + os.path.sep\n\n @property\n def path_utils(self):\n return self.path_root + 'utils' + os.path.sep\n\n @property\n def path_data(self):\n return self.path_utils + 'data' + os.path.sep\n\n def load_X_train(self):\n path = self.path_data + 'X_train.pkl'\n return load_pkl.load(path=path)\n\n def load_X_val(self):\n path = self.path_data + 'X_val.pkl'\n return load_pkl.load(path=path)\n\n def load_y_train(self):\n path = self.path_data + 'y_train.pkl'\n return load_pkl.load(path=path)\n\n def load_y_val(self):\n path = self.path_data + 'y_val.pkl'\n return load_pkl.load(path=path)\n\n def save_X_train(self, X, verbose=True):\n path = self.path_data + 'X_train.pkl'\n save_pkl.save(path=path, object=X, verbose=verbose)\n\n def save_X_val(self, X, verbose=True):\n path = self.path_data + 'X_val.pkl'\n save_pkl.save(path=path, object=X, verbose=verbose)\n\n def save_y_train(self, y, verbose=True):\n path = self.path_data + 'y_train.pkl'\n save_pkl.save(path=path, object=y, verbose=verbose)\n\n def save_y_val(self, y, verbose=True):\n path = self.path_data + 'y_val.pkl'\n save_pkl.save(path=path, object=y, verbose=verbose)\n\n def get_model_names_all(self):\n model_names = []\n for stack_name in self.models_level.keys():\n model_names += self.get_model_names(stack_name)\n return model_names\n\n def get_model_names(self, stack_name):\n model_names = []\n levels = np.sort(list(self.models_level[stack_name].keys()))\n for level in levels:\n model_names += self.models_level[stack_name][level]\n return model_names\n\n def get_max_level(self, stack_name: str):\n try:\n return np.sort(list(self.models_level[stack_name].keys()))[-1]\n except IndexError:\n return -1\n\n def get_max_level_all(self):\n max_level = 0\n for stack_name in self.models_level.keys():\n max_level = max(max_level, self.get_max_level(stack_name))\n return max_level\n\n def get_models(self, hyperparameters, hyperparameter_tune=False, **kwargs):\n raise NotImplementedError\n\n def get_model_level(self, model_name):\n for stack_name in self.models_level.keys():\n for level in self.models_level[stack_name].keys():\n if model_name in self.models_level[stack_name][level]:\n return level\n raise ValueError('Model' + str(model_name) + 'does not exist in trainer.')\n\n def set_contexts(self, path_context):\n self.path, self.model_paths = self.create_contexts(path_context)\n\n def create_contexts(self, path_context):\n path = path_context\n model_paths = copy.deepcopy(self.model_paths)\n for model in self.model_paths:\n prev_path = self.model_paths[model]\n model_local_path = prev_path.split(self.path, 1)[1]\n new_path = path + model_local_path\n model_paths[model] = new_path\n\n return path, model_paths\n\n def train(self, X_train, y_train, X_test=None, y_test=None, hyperparameter_tune=True, feature_prune=False, holdout_frac=0.1, hyperparameters=None):\n raise NotImplementedError\n\n def train_single(self, X_train, y_train, X_test, y_test, model, kfolds=None, k_fold_start=0, k_fold_end=None, n_repeats=None, n_repeat_start=0, level=0, time_limit=None):\n if kfolds is None:\n kfolds = self.kfolds\n if n_repeats is None:\n n_repeats = self.n_repeats\n if model.feature_types_metadata is None:\n model.feature_types_metadata = self.feature_types_metadata # TODO: move this into model creation process?\n model_fit_kwargs = {}\n if self.scheduler_options is not None:\n model_fit_kwargs = {'verbosity': self.verbosity, \n 'num_cpus': self.scheduler_options['resource']['num_cpus'],\n 'num_gpus': self.scheduler_options['resource']['num_gpus']} # Additional configurations for model.fit\n if self.bagged_mode or isinstance(model, WeightedEnsembleModel):\n model.fit(X=X_train, y=y_train, k_fold=kfolds, k_fold_start=k_fold_start, k_fold_end=k_fold_end, n_repeats=n_repeats, n_repeat_start=n_repeat_start, compute_base_preds=False, time_limit=time_limit, **model_fit_kwargs)\n else:\n model.fit(X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test, time_limit=time_limit, **model_fit_kwargs)\n return model\n\n def train_and_save(self, X_train, y_train, X_test, y_test, model: AbstractModel, stack_name='core', kfolds=None, k_fold_start=0, k_fold_end=None, n_repeats=None, n_repeat_start=0, level=0, time_limit=None):\n fit_start_time = time.time()\n model_names_trained = []\n try:\n if time_limit is not None:\n if time_limit <= 0:\n logging.log(15, 'Skipping ' + str(model.name) + ' due to lack of time remaining.')\n return model_names_trained\n time_left_total = self.time_limit - (fit_start_time - self.time_train_start)\n logging.log(20, 'Fitting model: ' + str(model.name) + ' ...' + ' Training model for up to ' + str(round(time_limit, 2)) + 's of the ' + str(round(time_left_total, 2)) + 's of remaining time.')\n else:\n logging.log(20, 'Fitting model: ' + str(model.name) + ' ...')\n model = self.train_single(X_train, y_train, X_test, y_test, model, kfolds=kfolds, k_fold_start=k_fold_start, k_fold_end=k_fold_end, n_repeats=n_repeats, n_repeat_start=n_repeat_start, level=level, time_limit=time_limit)\n fit_end_time = time.time()\n if isinstance(model, BaggedEnsembleModel):\n if model.bagged_mode or isinstance(model, WeightedEnsembleModel):\n score = model.score_with_oof(y=y_train)\n else:\n score = np.nan\n else:\n score = model.score(X=X_test, y=y_test)\n pred_end_time = time.time()\n fit_time = fit_end_time - fit_start_time\n if np.isnan(score):\n pred_time = np.nan\n else:\n pred_time = pred_end_time - fit_end_time\n self.save_model(model=model)\n except TimeLimitExceeded as err:\n logger.log(20, '\\tTime limit exceeded... Skipping ' + model.name + '.')\n # logger.log(20, '\\tTime wasted: ' + str(time.time() - fit_start_time))\n del model\n except NotEnoughMemoryError as err:\n logger.warning('\\tNot enough memory to train model... Skipping ' + model.name + '.')\n del model\n except Exception as err:\n if self.verbosity >= 1:\n traceback.print_tb(err.__traceback__)\n logger.exception('Warning: Exception caused ' +str(model.name)+' to fail during training... Skipping this model.')\n logger.log(20, err)\n del model\n else:\n self.add_model(model=model, stack_name=stack_name, level=level, score=score, fit_time=fit_time, pred_time=pred_time, n_repeat_start=n_repeat_start)\n model_names_trained.append(model.name)\n if self.low_memory:\n del model\n return model_names_trained\n\n def add_model(self, model, stack_name, level, score, fit_time, pred_time, n_repeat_start=0):\n stack_loc = self.models_level[stack_name] # TODO: Consider removing, have train_multi handle this\n self.model_performance[model.name] = score\n self.model_paths[model.name] = model.path\n self.model_types[model.name] = type(model)\n if isinstance(model, BaggedEnsembleModel):\n self.model_types_inner[model.name] = model._child_type\n else:\n self.model_types_inner[model.name] = type(model)\n if not np.isnan(score):\n logger.log(20, '\\t' + str(round(score, 4)) + '\\t = Validation ' + self.objective_func.name + ' score')\n if not np.isnan(fit_time):\n logger.log(20, '\\t' + str(round(fit_time, 2)) + 's' + '\\t = Training runtime')\n if not np.isnan(pred_time):\n logger.log(20, '\\t' + str(round(pred_time, 2)) + 's' + '\\t = Validation runtime')\n # TODO: Should model have fit-time/pred-time information?\n # TODO: Add to HPO\n if n_repeat_start > 0:\n self.model_fit_times[model.name] += fit_time\n self.model_pred_times[model.name] += pred_time\n else:\n self.model_fit_times[model.name] = fit_time\n self.model_pred_times[model.name] = pred_time\n if model.is_valid():\n if model.name not in stack_loc[level]:\n stack_loc[level].append(model.name)\n if self.model_best_core is None:\n self.model_best_core = model.name\n else:\n best_score = self.model_performance[self.model_best_core]\n cur_score = self.model_performance[model.name]\n if cur_score > best_score:\n # new best core model\n self.model_best_core = model.name\n if self.low_memory:\n del model\n\n def train_single_full(self, X_train, y_train, X_test, y_test, model: AbstractModel, feature_prune=False, \n hyperparameter_tune=True, stack_name='core', kfolds=None, k_fold_start=0, k_fold_end=None, n_repeats=None, n_repeat_start=0, level=0, time_limit=None):\n if (n_repeat_start == 0) and (k_fold_start == 0):\n model.feature_types_metadata = self.feature_types_metadata # TODO: Don't set feature_types_metadata here\n if feature_prune:\n if n_repeat_start != 0:\n raise ValueError('n_repeat_start must be 0 to feature_prune, value = ' + str(n_repeat_start))\n elif k_fold_start != 0:\n raise ValueError('k_fold_start must be 0 to feature_prune, value = ' + str(k_fold_start))\n self.autotune(X_train=X_train, X_holdout=X_test, y_train=y_train, y_holdout=y_test, model_base=model) # TODO: Update to use CV instead of holdout\n if hyperparameter_tune:\n if self.scheduler_func is None or self.scheduler_options is None:\n raise ValueError(\"scheduler_options cannot be None when hyperparameter_tune = True\")\n if n_repeat_start != 0:\n raise ValueError('n_repeat_start must be 0 to hyperparameter_tune, value = ' + str(n_repeat_start))\n elif k_fold_start != 0:\n raise ValueError('k_fold_start must be 0 to hyperparameter_tune, value = ' + str(k_fold_start))\n # hpo_models (dict): keys = model_names, values = model_paths\n try: # TODO: Make exception handling more robust? Return successful HPO models?\n if isinstance(model, BaggedEnsembleModel):\n hpo_models, hpo_model_performances, hpo_results = model.hyperparameter_tune(X=X_train, y=y_train, k_fold=kfolds, scheduler_options=(self.scheduler_func, self.scheduler_options), verbosity=self.verbosity)\n else:\n if (X_test is None) or (y_test is None):\n X_train, X_test, y_train, y_test = generate_train_test_split(X_train, y_train, problem_type=self.problem_type, test_size=0.2) # TODO: Adjust test_size, perhaps user specified?\n hpo_models, hpo_model_performances, hpo_results = model.hyperparameter_tune(X_train=X_train, X_test=X_test,\n Y_train=y_train, Y_test=y_test, scheduler_options=(self.scheduler_func, self.scheduler_options), verbosity=self.verbosity)\n except Exception as err:\n if self.verbosity >= 1:\n traceback.print_tb(err.__traceback__)\n logger.exception('Warning: Exception caused ' + model.name + ' to fail during hyperparameter tuning... Skipping this model.')\n logger.debug(err)\n del model\n model_names_trained = []\n else:\n model_names_trained = list(sorted(hpo_models.keys()))\n self.models_level_hpo[stack_name][level] += model_names_trained\n self.model_paths.update(hpo_models)\n self.model_performance.update(hpo_model_performances)\n self.hpo_results[model.name] = hpo_results\n self.model_types.update({name: type(model) for name in model_names_trained})\n if isinstance(model, BaggedEnsembleModel):\n self.model_types_inner.update({name: model._child_type for name in model_names_trained})\n else:\n self.model_types_inner.update({name: type(model) for name in model_names_trained})\n else:\n model_names_trained = self.train_and_save(X_train, y_train, X_test, y_test, model, stack_name=stack_name, kfolds=kfolds, k_fold_start=k_fold_start, k_fold_end=k_fold_end, n_repeats=n_repeats, n_repeat_start=n_repeat_start, level=level, time_limit=time_limit)\n self.save()\n return model_names_trained\n\n # TODO: How to deal with models that fail during this? They have trained valid models before, but should we still use those models or remove the entire model? Currently we still use models.\n # TODO: Time allowance can be made better by only using time taken during final model training and not during HPO and feature pruning.\n # TODO: Time allowance not accurate if running from fit_continue\n # Takes trained bagged ensemble models and fits additional k-fold bags.\n def train_multi_repeats(self, X_train, y_train, X_test, y_test, models, kfolds, n_repeats, n_repeat_start=1, stack_name='core', level=0, time_limit=None):\n models_valid = models\n models_valid_next = []\n repeats_completed = 0\n time_start = time.time()\n for n in range(n_repeat_start, n_repeats):\n if time_limit is not None:\n time_start_repeat = time.time()\n time_left = time_limit - (time_start_repeat - time_start)\n if n == n_repeat_start:\n time_required = self.time_limit_per_level * 0.575 # Require slightly over 50% to be safe\n else:\n time_required = (time_start_repeat - time_start) / repeats_completed * (0.575/0.425)\n if time_left < time_required:\n logger.log(15, 'Not enough time left to finish repeated k-fold bagging, stopping early ...')\n break\n logger.log(20, 'Repeating k-fold bagging: ' + str(n+1) + '/' + str(n_repeats))\n for i, model in enumerate(models_valid):\n if isinstance(model, str):\n model = self.load_model(model)\n if time_limit is None:\n time_left = None\n else:\n time_start_model = time.time()\n time_left = time_limit - (time_start_model - time_start)\n models_valid_next += self.train_single_full(X_train, y_train, X_test, y_test, model, hyperparameter_tune=False, feature_prune=False, stack_name=stack_name, kfolds=kfolds, k_fold_start=0, k_fold_end=None, n_repeats=n+1, n_repeat_start=n, level=level, time_limit=time_left)\n models_valid = copy.deepcopy(models_valid_next)\n models_valid_next = []\n repeats_completed += 1\n logger.log(20, 'Completed ' + str(n_repeat_start + repeats_completed) + '/' + str(n_repeats) + ' k-fold bagging repeats ...')\n return models_valid\n\n def train_multi_initial(self, X_train, y_train, X_test, y_test, models: List[AbstractModel], kfolds, n_repeats, hyperparameter_tune=True, feature_prune=False, stack_name='core', level=0, time_limit=None):\n stack_loc = self.models_level[stack_name]\n\n model_names_trained = []\n model_names_trained_hpo = []\n models_valid = models\n if kfolds == 0:\n models_valid = self.train_multi_fold(X_train, y_train, X_test, y_test, models_valid, hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune, stack_name=stack_name,\n kfolds=kfolds, level=level, time_limit=time_limit)\n else:\n k_fold_start = 0\n if hyperparameter_tune or feature_prune:\n time_start = time.time()\n models_valid = self.train_multi_fold(X_train, y_train, X_test, y_test, models_valid, hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune, stack_name=stack_name,\n kfolds=kfolds, k_fold_start=0, k_fold_end=1, n_repeats=n_repeats, n_repeat_start=0, level=level, time_limit=time_limit)\n k_fold_start = 1\n if time_limit is not None:\n time_limit = time_limit - (time.time() - time_start)\n\n models_valid = self.train_multi_fold(X_train, y_train, X_test, y_test, models_valid, hyperparameter_tune=False, feature_prune=False, stack_name=stack_name,\n kfolds=kfolds, k_fold_start=k_fold_start, k_fold_end=kfolds, n_repeats=n_repeats, n_repeat_start=0, level=level, time_limit=time_limit)\n\n if hyperparameter_tune:\n model_names_trained_hpo += models_valid\n else:\n model_names_trained += models_valid\n\n stack_loc[level] += model_names_trained_hpo # Update model list with (potentially empty) list of new models created during HPO\n model_names_trained += model_names_trained_hpo\n unique_names = []\n for item in stack_loc[level]:\n if item not in unique_names: unique_names.append(item)\n stack_loc[level] = unique_names # make unique and preserve order\n return model_names_trained\n\n # TODO: Ban KNN from being a Stacker model outside of aux. Will need to ensemble select on all stack layers ensemble selector to make it work\n # TODO: Robert dataset, LightGBM is super good but RF and KNN take all the time away from it on 1h despite being much worse\n # TODO: Add time_limit_per_model\n def train_multi_fold(self, X_train, y_train, X_test, y_test, models: List[AbstractModel], hyperparameter_tune=True, feature_prune=False, stack_name='core', kfolds=None, k_fold_start=0, k_fold_end=None, n_repeats=None, n_repeat_start=0, level=0, time_limit=None):\n models_valid = []\n time_start = time.time()\n for i, model in enumerate(models):\n if isinstance(model, str):\n model = self.load_model(model)\n elif self.low_memory:\n model = copy.deepcopy(model)\n # TODO: Only update scores when finished, only update model as part of final models if finished!\n if time_limit is None:\n time_left = None\n else:\n time_start_model = time.time()\n time_left = time_limit - (time_start_model - time_start)\n model_name_trained_lst = self.train_single_full(X_train, y_train, X_test, y_test, model, hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune, stack_name=stack_name,\n kfolds=kfolds, k_fold_start=k_fold_start, k_fold_end=k_fold_end,\n n_repeats=n_repeats, n_repeat_start=n_repeat_start, level=level, time_limit=time_left)\n\n if self.low_memory:\n del model\n models_valid += model_name_trained_lst\n\n return models_valid\n\n def train_multi(self, X_train, y_train, X_test, y_test, models: List[AbstractModel], hyperparameter_tune=True, feature_prune=False, stack_name='core', kfolds=None, n_repeats=None, n_repeat_start=0, level=0, time_limit=None):\n if kfolds is None:\n kfolds = self.kfolds\n if n_repeats is None:\n n_repeats = self.n_repeats\n if (kfolds == 0) and (n_repeats != 1):\n raise ValueError('n_repeats must be 1 when kfolds is 0, values: (%s, %s)' % (n_repeats, kfolds))\n if time_limit is None:\n n_repeats_initial = n_repeats\n else:\n n_repeats_initial = 1\n if n_repeat_start == 0:\n time_start = time.time()\n model_names_trained = self.train_multi_initial(X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test, models=models, kfolds=kfolds, n_repeats=n_repeats_initial, hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune,\n stack_name=stack_name, level=level, time_limit=time_limit)\n n_repeat_start = n_repeats_initial\n if time_limit is not None:\n time_limit = time_limit - (time.time() - time_start)\n else:\n model_names_trained = models\n if (n_repeats > 1) and self.bagged_mode and (n_repeat_start < n_repeats):\n model_names_trained = self.train_multi_repeats(X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test, models=model_names_trained,\n kfolds=kfolds, n_repeats=n_repeats, n_repeat_start=n_repeat_start, stack_name=stack_name, level=level, time_limit=time_limit)\n return model_names_trained\n\n def train_multi_and_ensemble(self, X_train, y_train, X_test, y_test, models: List[AbstractModel], hyperparameter_tune=True, feature_prune=False):\n if self.save_data and not self.is_data_saved:\n self.save_X_train(X_train)\n self.save_y_train(y_train)\n if X_test is not None:\n self.save_X_val(X_test)\n if y_test is not None:\n self.save_y_val(y_test)\n self.is_data_saved = True\n\n self.num_rows_train = len(X_train)\n if X_test is not None:\n self.num_rows_train += len(X_test)\n self.num_cols_train = len(list(X_train.columns))\n self.time_train_start = time.time()\n self.train_multi_levels(X_train, y_train, X_test, y_test, models=models, hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune, level_start=0, level_end=self.stack_ensemble_levels)\n if len(self.get_model_names_all()) == 0:\n raise ValueError('AutoGluon did not successfully train any models')\n\n def train_multi_levels(self, X_train, y_train, X_test, y_test, models: List[AbstractModel], hyperparameter_tune=True, feature_prune=False, level_start=0, level_end=0):\n for level in range(max(0, level_start), level_end + 1):\n self.time_train_level_start = time.time()\n self.time_limit_per_level = (self.time_limit - (self.time_train_level_start - self.time_train_start)) / (level_end + 1 - level)\n if self.ignore_time_limit:\n time_limit_core = None\n time_limit_aux = None\n else:\n time_limit_core = self.time_limit_per_level\n time_limit_aux = max(self.time_limit_per_level * 0.1, min(self.time_limit, 360)) # Allows aux to go over time_limit, but only by a small amount\n if level == 0:\n self.stack_new_level(X=X_train, y=y_train, X_test=X_test, y_test=y_test, models=models, level=level, hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune, time_limit_core=time_limit_core, time_limit_aux=time_limit_aux)\n else:\n self.stack_new_level(X=X_train, y=y_train, X_test=X_test, y_test=y_test, level=level, time_limit_core=time_limit_core, time_limit_aux=time_limit_aux)\n\n self.save()\n\n def stack_new_level(self, X, y, X_test=None, y_test=None, level=0, models=None, hyperparameter_tune=False, feature_prune=False, time_limit_core=None, time_limit_aux=None):\n self.stack_new_level_core(X=X, y=y, X_test=X_test, y_test=y_test, models=models, level=level, hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune, time_limit=time_limit_core)\n if self.bagged_mode:\n self.stack_new_level_aux(X=X, y=y, level=level+1, time_limit=time_limit_aux)\n else:\n self.stack_new_level_aux(X=X_test, y=y_test, fit=False, level=level+1, time_limit=time_limit_aux)\n\n def stack_new_level_core(self, X, y, X_test=None, y_test=None, models=None, level=1, stack_name='core', kfolds=None, n_repeats=None, hyperparameter_tune=False, feature_prune=False, time_limit=None):\n use_orig_features = True\n if models is None:\n models = self.get_models(self.hyperparameters, level=level)\n if kfolds is None:\n kfolds = self.kfolds\n if n_repeats is None:\n n_repeats = self.n_repeats\n\n if self.bagged_mode:\n if level == 0:\n (base_model_names, base_model_paths, base_model_types) = ([], {}, {})\n elif level > 0:\n base_model_names, base_model_paths, base_model_types = self.get_models_info(model_names=self.models_level['core'][level - 1])\n if len(base_model_names) == 0:\n logger.log(20, 'No base models to train on, skipping stack level...')\n return\n else:\n raise AssertionError('Stack level cannot be negative! level = %s' % level)\n models = [\n StackerEnsembleModel(path=self.path, name=model.name + '_STACKER_l' + str(level), model_base=model, base_model_names=base_model_names,\n base_model_paths_dict=base_model_paths, base_model_types_dict=base_model_types, use_orig_features=use_orig_features,\n num_classes=self.num_classes, random_state=level)\n for model in models]\n X_train_init = self.get_inputs_to_stacker(X, level_start=0, level_end=level, fit=True)\n if X_test is not None:\n X_test = self.get_inputs_to_stacker(X_test, level_start=0, level_end=level, fit=False)\n\n return self.train_multi(X_train=X_train_init, y_train=y, X_test=X_test, y_test=y_test, models=models, hyperparameter_tune=hyperparameter_tune, feature_prune=feature_prune, level=level, stack_name=stack_name, kfolds=kfolds, n_repeats=n_repeats, time_limit=time_limit)\n\n def stack_new_level_aux(self, X, y, level, fit=True, time_limit=None):\n stack_name = 'aux1'\n X_train_stack_preds = self.get_inputs_to_stacker(X, level_start=0, level_end=level, fit=fit)\n self.generate_weighted_ensemble(X=X_train_stack_preds, y=y, level=level, kfolds=0, n_repeats=1, stack_name=stack_name, time_limit=time_limit)\n\n def generate_weighted_ensemble(self, X, y, level, kfolds=0, n_repeats=1, stack_name=None, hyperparameters=None, time_limit=None, name_suffix=''):\n if len(self.models_level['core'][level-1]) == 0:\n logger.log(20, 'No base models to train on, skipping weighted ensemble...')\n return\n weighted_ensemble_model = WeightedEnsembleModel(path=self.path, name='weighted_ensemble_' + name_suffix + 'k' + str(kfolds) + '_l' + str(level), base_model_names=self.models_level['core'][level-1],\n base_model_paths_dict=self.model_paths, base_model_types_dict=self.model_types, base_model_types_inner_dict=self.model_types_inner, base_model_performances_dict=self.model_performance, hyperparameters=hyperparameters,\n objective_func=self.objective_func, num_classes=self.num_classes, random_state=level)\n\n self.train_multi(X_train=X, y_train=y, X_test=None, y_test=None, models=[weighted_ensemble_model], kfolds=kfolds, n_repeats=n_repeats, hyperparameter_tune=False, feature_prune=False, stack_name=stack_name, level=level, time_limit=time_limit)\n if weighted_ensemble_model.name in self.get_model_names_all():\n if self.model_best is None:\n self.model_best = weighted_ensemble_model.name\n else:\n best_score = self.model_performance[self.model_best]\n cur_score = self.model_performance[weighted_ensemble_model.name]\n if cur_score > best_score:\n # new best model\n self.model_best = weighted_ensemble_model.name\n return weighted_ensemble_model.name\n\n def generate_stack_log_reg(self, X, y, level, kfolds=0, stack_name=None):\n base_model_names, base_model_paths, base_model_types = self.get_models_info(model_names=self.models_level['core'][level-1])\n stacker_model_lr = get_preset_stacker_model(path=self.path, problem_type=self.problem_type, objective_func=self.objective_func, num_classes=self.num_classes)\n name_new = stacker_model_lr.name + '_STACKER_k' + str(kfolds) + '_l' + str(level)\n\n stacker_model_lr = StackerEnsembleModel(path=self.path, name=name_new, model_base=stacker_model_lr, base_model_names=base_model_names, base_model_paths_dict=base_model_paths, base_model_types_dict=base_model_types,\n use_orig_features=False,\n num_classes=self.num_classes, random_state=level)\n\n return self.train_multi(X_train=X, y_train=y, X_test=None, y_test=None, models=[stacker_model_lr], hyperparameter_tune=False, feature_prune=False, stack_name=stack_name, kfolds=kfolds, level=level)\n\n def predict(self, X, model=None):\n if model is not None:\n return self.predict_model(X, model)\n elif self.model_best is not None:\n return self.predict_model(X, self.model_best)\n elif self.model_best_core is not None:\n return self.predict_model(X, self.model_best_core)\n else:\n raise Exception('Trainer has no fit models to predict with.')\n\n def predict_proba(self, X, model=None):\n if model is not None:\n return self.predict_proba_model(X, model)\n elif self.model_best is not None:\n return self.predict_proba_model(X, self.model_best)\n elif self.model_best_core is not None:\n return self.predict_proba_model(X, self.model_best_core)\n else:\n raise Exception('Trainer has no fit models to predict with.')\n\n def predict_model(self, X, model, level_start=0):\n if isinstance(model, str):\n model = self.load_model(model)\n X = self.get_inputs_to_model(model=model, X=X, level_start=level_start, fit=False)\n return model.predict(X=X, preprocess=False)\n\n def predict_proba_model(self, X, model, level_start=0):\n if isinstance(model, str):\n model = self.load_model(model)\n X = self.get_inputs_to_model(model=model, X=X, level_start=level_start, fit=False)\n return model.predict_proba(X=X, preprocess=False)\n\n def get_inputs_to_model(self, model, X, level_start, fit=False, preprocess=True):\n if isinstance(model, str):\n model = self.load_model(model)\n model_level = self.get_model_level(model.name)\n if model_level >= 1:\n X = self.get_inputs_to_stacker(X=X, level_start=level_start, level_end=model_level-1, fit=fit)\n X = model.preprocess(X, fit=fit, preprocess=preprocess)\n else:\n if preprocess:\n X = model.preprocess(X)\n return X\n\n def score(self, X, y, model=None):\n if self.objective_func_expects_y_pred:\n y_pred_ensemble = self.predict(X=X, model=model)\n return self.objective_func(y, y_pred_ensemble)\n else:\n y_pred_proba_ensemble = self.predict_proba(X=X, model=model)\n return self.objective_func(y, y_pred_proba_ensemble)\n\n def score_with_y_pred_proba(self, y, y_pred_proba):\n if self.objective_func_expects_y_pred:\n y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba, problem_type=self.problem_type)\n return self.objective_func(y, y_pred)\n else:\n return self.objective_func(y, y_pred_proba)\n\n def autotune(self, X_train, X_holdout, y_train, y_holdout, model_base: AbstractModel):\n model_base.feature_prune(X_train, X_holdout, y_train, y_holdout)\n\n def pred_proba_predictions(self, models, X_test):\n preds = []\n for model in models:\n if isinstance(model, str):\n model = self.load_model(model)\n model_pred = model.predict_proba(X_test)\n preds.append(model_pred)\n return preds\n\n def get_inputs_to_stacker(self, X, level_start, level_end, y_pred_probas=None, fit=False):\n if level_start > level_end:\n raise AssertionError('level_start cannot be greater than level end:' + str(level_start) + ', ' + str(level_end))\n if (level_start == 0) and (level_end == 0):\n return X\n if fit:\n if level_start >= 1:\n dummy_stacker_start = self._get_dummy_stacker(level=level_start, use_orig_features=True)\n cols_to_drop = dummy_stacker_start.stack_columns\n X = X.drop(cols_to_drop, axis=1)\n dummy_stacker = self._get_dummy_stacker(level=level_end, use_orig_features=True)\n X = dummy_stacker.preprocess(X=X, preprocess=False, fit=True, compute_base_preds=True)\n elif y_pred_probas is not None:\n dummy_stacker = self._get_dummy_stacker(level=level_end, use_orig_features=True)\n X_stacker = dummy_stacker.pred_probas_to_df(pred_proba=y_pred_probas)\n if dummy_stacker.use_orig_features:\n if level_start >= 1:\n dummy_stacker_start = self._get_dummy_stacker(level=level_start, use_orig_features=True)\n cols_to_drop = dummy_stacker_start.stack_columns\n X = X.drop(cols_to_drop, axis=1)\n X = pd.concat([X_stacker, X], axis=1)\n else:\n X = X_stacker\n else:\n dummy_stackers = {}\n for level in range(level_start, level_end+1):\n if level >= 1:\n dummy_stackers[level] = self._get_dummy_stacker(level=level, use_orig_features=True)\n for level in range(level_start, level_end):\n if level >= 1:\n cols_to_drop = dummy_stackers[level].stack_columns\n else:\n cols_to_drop = []\n X = dummy_stackers[level+1].preprocess(X=X, preprocess=False, fit=False, compute_base_preds=True)\n if len(cols_to_drop) > 0:\n X = X.drop(cols_to_drop, axis=1)\n return X\n\n # TODO: add compress support for non-bagged models\n def compress(self, X=None, y=None, models=None):\n if X is None:\n X = self.load_X_train()\n if y is None:\n y = self.load_y_train()\n if models is None:\n models = self.get_model_names_all()\n\n models_compressed = {}\n model_levels = defaultdict(dd_list)\n for model_name in models:\n model = self.load_model(model_name)\n if isinstance(model, WeightedEnsembleModel):\n continue\n model_level = self.get_model_level(model_name)\n model_levels['compressed'][model_level] += [model_name]\n model_compressed = model.convert_to_compressed_template()\n models_compressed[model_name] = model_compressed\n levels = sorted(model_levels['compressed'].keys())\n models_trained_full = []\n for level in levels:\n models_level = model_levels['compressed'][level]\n models_level = [models_compressed[model_name] for model_name in models_level]\n models_trained = self.stack_new_level_core(X=X, y=y, models=models_level, level=level, stack_name='compressed', hyperparameter_tune=False, feature_prune=False, kfolds=0, n_repeats=1)\n models_trained_full += models_trained\n return models_trained_full\n\n def distill(self, X=None, y=None):\n if X is None:\n X = self.load_X_train()\n if y is None:\n y = self.load_y_train()\n\n model_best = self.load_model(self.model_best)\n if self.problem_type == MULTICLASS:\n raise NotImplementedError\n if not self.bagged_mode:\n raise NotImplementedError\n models_distill = get_preset_models_distillation(path=self.path, problem_type=self.problem_type, objective_func=self.objective_func, stopping_metric=self.stopping_metric, num_classes=self.num_classes, hyperparameters=self.hyperparameters)\n y_distill = pd.Series(model_best.oof_pred_proba)\n\n # TODO: Do stratified for binary/multiclass, folds are not aligned!\n models_trained = self.stack_new_level_core(X=X, y=y_distill, models=models_distill, level=0, stack_name='distilled', hyperparameter_tune=False, feature_prune=False)\n self.compress(X=X, y=y_distill, models=models_trained)\n\n self.save()\n\n def save_model(self, model):\n if self.low_memory:\n model.save()\n else:\n self.models[model.name] = model\n\n def save(self):\n save_pkl.save(path=self.path + self.trainer_file_name, object=self)\n\n def load_models_into_memory(self, model_names=None):\n if model_names is None:\n model_names = self.get_model_names_all()\n models = []\n for model_name in model_names:\n model = self.load_model(model_name)\n self.models[model.name] = model\n models.append(model)\n\n for model in models:\n if isinstance(model, StackerEnsembleModel):\n for base_model_name in model.base_model_names:\n if base_model_name not in model.base_models_dict.keys():\n if base_model_name in self.models.keys():\n model.base_models_dict[base_model_name] = self.models[base_model_name]\n if isinstance(model, BaggedEnsembleModel):\n for fold, fold_model in enumerate(model.models):\n if isinstance(fold_model, str):\n model.models[fold] = model.load_child(fold_model)\n\n def load_model(self, model_name: str) -> AbstractModel:\n if model_name in self.models.keys():\n return self.models[model_name]\n else:\n return self.model_types[model_name].load(path=self.model_paths[model_name], reset_paths=self.reset_paths)\n\n def _get_dummy_stacker(self, level, use_orig_features=True):\n model_names = self.models_level['core'][level-1]\n base_models_dict = {}\n for model_name in model_names:\n if model_name in self.models.keys():\n base_models_dict[model_name] = self.models[model_name]\n dummy_stacker = StackerEnsembleModel(\n path='', name='',\n model_base=AbstractModel(path='', name='', problem_type=self.problem_type, objective_func=self.objective_func),\n base_model_names=model_names, base_models_dict=base_models_dict, base_model_paths_dict=self.model_paths,\n base_model_types_dict=self.model_types, use_orig_features=use_orig_features, num_classes=self.num_classes, random_state=level\n )\n return dummy_stacker\n\n def get_models_info(self, model_names):\n model_names = copy.deepcopy(model_names)\n model_paths = {model_name: self.model_paths[model_name] for model_name in model_names}\n model_types = {model_name: self.model_types[model_name] for model_name in model_names}\n return model_names, model_paths, model_types\n\n def leaderboard(self):\n model_names = self.get_model_names_all()\n score_val = []\n fit_time = []\n pred_time_val = []\n stack_level = []\n for model_name in model_names:\n score_val.append(self.model_performance.get(model_name))\n fit_time.append(self.model_fit_times.get(model_name))\n pred_time_val.append(self.model_pred_times.get(model_name))\n stack_level.append(self.get_model_level(model_name))\n df = pd.DataFrame(data={\n 'model': model_names,\n 'score_val': score_val,\n 'fit_time': fit_time,\n 'pred_time_val': pred_time_val,\n 'stack_level': stack_level,\n })\n df_sorted = df.sort_values(by=['score_val', 'model'], ascending=False)\n return df_sorted\n\n def info(self):\n model_count = len(self.get_model_names_all())\n if self.model_best is not None:\n best_model = self.model_best\n else:\n best_model = self.model_best_core\n best_model_score_val = self.model_performance.get(best_model)\n # fit_time = None\n num_bagging_folds = self.kfolds\n max_stack_level = self.get_max_level('core')\n best_model_stack_level = self.get_model_level(best_model)\n problem_type = self.problem_type\n objective_func = self.objective_func.name\n time_train_start = self.time_train_start\n num_rows_train = self.num_rows_train\n num_cols_train = self.num_cols_train\n num_classes = self.num_classes\n # TODO:\n # Disk size of models\n # Raw feature count\n # HPO time\n # Bag time\n # Feature prune time\n # Exception count / models failed count\n # True model count (models * kfold)\n # AutoGluon version fit on\n # Max memory usage\n # CPU count used / GPU count used\n\n info = {\n 'model_count': model_count,\n 'best_model': best_model,\n 'best_model_score_val': best_model_score_val,\n 'num_bagging_folds': num_bagging_folds,\n 'max_stack_level': max_stack_level,\n 'best_model_stack_level': best_model_stack_level,\n 'problem_type': problem_type,\n 'objective_func': objective_func,\n 'time_train_start': time_train_start,\n 'num_rows_train': num_rows_train,\n 'num_cols_train': num_cols_train,\n 'num_classes': num_classes,\n }\n\n return info\n\n @classmethod\n def load(cls, path, reset_paths=False):\n load_path = path + cls.trainer_file_name\n if not reset_paths:\n return load_pkl.load(path=load_path)\n else:\n obj = load_pkl.load(path=load_path)\n obj.set_contexts(path)\n obj.reset_paths = reset_paths\n return obj\n"
] | [
[
"numpy.isnan",
"pandas.concat",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
zangjinxia/remote-sensing | [
"942fe914e93ab25e0074e061677e0f5294afdc24"
] | [
"resizeData_final_github.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author zangjinx\r\n@date 2020-12-3\r\n@brief 利用shp裁剪影像,影像需具有坐标信息\r\n\"\"\"\r\n\r\nimport gdal\r\nfrom osgeo import gdal_array,ogr\r\nimport gdalnumeric\r\nfrom PIL import Image, ImageDraw\r\nimport os\r\nimport sys\r\nimport numpy as np\r\n\r\ngdal.UseExceptions()\r\n\r\nclass Dataset:\r\n def read_img(self, filename):\r\n dataset = gdal.Open(filename)\r\n\r\n width = dataset.RasterXSize\r\n height = dataset.RasterYSize\r\n band = dataset.RasterCount\r\n im_data = dataset.ReadAsArray(0, 0, width, height)\r\n\r\n geotrans = dataset.GetGeoTransform()\r\n proj = dataset.GetProjection()\r\n # data = np.zeros([width, height, band])\r\n\r\n return im_data, proj, geotrans,band,width,height\r\n\r\n def write_tiff(self, filename, proj, geotrans, data,minx,maxy):\r\n # gdal数据类型包括\r\n # gdal.GDT_Byte,\r\n # gdal .GDT_UInt16, gdal.GDT_Int16, gdal.GDT_UInt32, gdal.GDT_Int32,\r\n # gdal.GDT_Float32, gdal.GDT_Float64\r\n # 判断栅格数据的数据类型\r\n if 'int8' in data.dtype.name:\r\n datatype = gdal.GDT_Byte\r\n elif 'int16' in data.dtype.name:\r\n datatype = gdal.GDT_UInt16\r\n else:\r\n datatype = gdal.GDT_Float32\r\n\r\n # 判读数组维数\r\n if len(data.shape) == 3:\r\n bands, height, width = data.shape\r\n else:\r\n bands = 1\r\n height, width = data.shape\r\n # 创建文件\r\n driver = gdal.GetDriverByName(\"GTiff\")\r\n dataset = driver.Create(filename, width, height, bands, datatype)\r\n geotrans_update = (minx, geotrans[1],geotrans[2],maxy,geotrans[4],geotrans[5])\r\n dataset.SetGeoTransform(geotrans_update)\r\n dataset.SetProjection(proj)\r\n\r\n if bands == 1:\r\n dataset.GetRasterBand(1).WriteArray(data)\r\n else:\r\n for i in range(bands):\r\n dataset.GetRasterBand(i + 1).WriteArray(data[i])\r\n del dataset\r\n\r\n# This function will convert the rasterized clipper shapefile\r\n# to a mask for use within GDAL.\r\n\r\ndef world2Pixel(geoMatrix, x, y):\r\n \"\"\"\r\n Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate\r\n the pixel location of a geospatial coordinate\r\n \"\"\"\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xDist = geoMatrix[1]\r\n pixel = int((x - ulX) / xDist)\r\n line = int((ulY - y) / xDist)\r\n return (pixel, line)\r\n\r\n\r\ndef main( shapefile_path, raster_path, outRaster_path):\r\n # 读取栅格数据\r\n dataset = Dataset()\r\n srcArray,proj,geo,band,width,height = dataset.read_img(raster_path)\r\n print(geo)\r\n if band == 1:\r\n clip = np.array(srcArray,dtype = float)\r\n else:\r\n clip = np.empty((band,height,width))\r\n for i in range(band):\r\n clip[i] = np.array(srcArray[i],dtype = float)\r\n\r\n\r\n # 打开shp文件\r\n shapef = ogr.Open(shapefile_path)\r\n lyr = shapef.GetLayer( os.path.split( os.path.splitext( shapefile_path )[0] )[1] )\r\n\r\n poly = lyr.GetNextFeature()\r\n\r\n # 将范围转为图像像素坐标\r\n minX, maxX, minY, maxY = lyr.GetExtent()\r\n ulX, ulY = world2Pixel(geo, minX, maxY)\r\n lrX, lrY = world2Pixel(geo, maxX, minY)\r\n\r\n # 计算新影像的尺寸大小\r\n pxWidth = int(lrX - ulX)\r\n pxHeight = int(lrY - ulY)\r\n if band == 1:\r\n clip = srcArray[ulY:lrY, ulX:lrX]\r\n else:\r\n\r\n clip = clip[:,ulY:lrY, ulX:lrX]\r\n\r\n\r\n\r\n # 创建一个新矩阵\r\n geoTrans = list(geo)\r\n geoTrans[0] = minX\r\n geoTrans[3] = maxY\r\n\r\n\r\n dataset.write_tiff(outRaster_path, proj, geo, clip,minX,maxY)\r\n\r\nif __name__ == '__main__':\r\n\r\n # if len(sys.argv) != 4:\r\n # # distutils.log.error(\"not enougth input parameters\")\r\n # sys.exit(-1)\r\n # shapefile_path = sys.argv[1]\r\n # raster_path = sys.argv[2]\r\n # outRaster_path = sys.argv[3]\r\n shapefile_path = 'I:/data/GF_radiance/range.shp'\r\n raster_path = 'I:/data/GF_radiance/Vege_test/GF1caijian.tif'\r\n outRaster_path = 'D:/AAdata/resize8.tif'\r\n main( shapefile_path, raster_path, outRaster_path )"
] | [
[
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
olivertren/check-worthy | [
"77ecc3cc92c800b3dd9839a7a7d6ccd79eba1214"
] | [
"src/models/keras_bi_lstm.py"
] | [
"import sys\nsys.path.append('/usr/users/oliverren/meng/check-worthy')\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Embedding, LSTM, Bidirectional, Dropout, Dense\nfrom keras import Sequential\nfrom src.data import debates\nimport numpy as np\n\nMAX_NUM_WORDS = 1000\n# data_set[i] is the ith crossvalidation split, data_set[i][0] says which debate is the test debate\n# data_set[i][1] are the sentences in the test set\n# data_set[i][2] are the sentences in the training set\ndata_sets = debates.get_for_crossvalidation()\n\ntexts = [sentence.text for sentence in data_sets[0][1]]\ntexts.extend([sentence.text for sentence in data_sets[0][2]])\nMAX_SEQUENCE_LENGTH = max([len(sentence.split()) for sentence in texts])\n# print(MAX_SEQUENCE_LENGTH)\n\n\n# the embedding is already pretrained, so whenever we go to a different dataset, we should reset the embedding layer\n# so that the embedding layer uses the words in the vocab of the dataset being tested\ntokenizer = Tokenizer(num_words= MAX_NUM_WORDS)\ntokenizer.fit_on_texts(texts)\nsequences = tokenizer.texts_to_sequences(texts)\n# print(sequences)\n# print(texts[0])\n# print(tokenizer.word_index)\nword_index = tokenizer.word_index\n# print(word_index)\n\ndata = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n\n# Create Embedding layer\nembeddings_index = {}\nf = open('/usr/users/oliverren/meng/check-worthy/data/glove/glove.6B.50d.txt')\ncount = 0\nfor line in f:\n values = line.split()\n if count == 0:\n # print(values)\n count += 1\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\n\nEMBEDDING_DIM = 50\n# + 1 because indexes are positive integers\nembedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not in embedding index will be all-zeros\n embedding_matrix[i] = embedding_vector\n\nembedding_layer = Embedding(len(word_index) + 1,\n EMBEDDING_DIM,\n weights = [embedding_matrix],\n input_length = MAX_SEQUENCE_LENGTH,\n trainable = False)\n\n\n# bi-directional\nLSTM_OUTPOUT_DIM = 200\nHIDDEN_LAYER_DIM = 200\nBATCH_SIZE = 32\n\nx_train = [sentence.text for sentence in data_sets[0][2]]\ny_train = [sentence.label for sentence in data_sets[0][2]]\nx_test = [sentence.text for sentence in data_sets[0][1]]\ny_test = [sentence.label for sentence in data_sets[0][1]]\n\n\nx_train = tokenizer.texts_to_sequences(x_train)\nx_train = pad_sequences(x_train, maxlen=MAX_SEQUENCE_LENGTH)\nx_test = tokenizer.texts_to_sequences(x_test)\nx_test = pad_sequences(x_test, maxlen=MAX_SEQUENCE_LENGTH)\n\n\nmodel = Sequential()\nmodel.add(embedding_layer)\nmodel.add(Bidirectional(LSTM(LSTM_OUTPOUT_DIM)))\nmodel.add(Dense(1,activation='sigmoid'))\nmodel.compile(loss = 'binary_crossentropy', optimizer='adam',metrics = ['accuracy'])\nprint(model.summary())\n\nmodel.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=200,\nvalidation_data=[x_test, y_test])\n\nmodel_2 = Sequential()\nmodel_2.add(embedding_layer)\nmodel_2.add(Bidirectional(LSTM(LSTM_OUTPOUT_DIM)))\nmodel_2.add(Dense(HIDDEN_LAYER_DIM*4,activation='relu'))\nmodel_2.add(Dropout(0.5))\nmodel_2.add(Dense(HIDDEN_LAYER_DIM,activation='relu'))\nmodel_2.add(Dropout(0.5))\nmodel_2.add(Dense(1,activation='sigmoid'))\nmodel_2.compile(loss = 'binary_crossentropy', optimizer='adam',metrics = ['accuracy'])\nprint(model_2.summary())\n\nmodel_2.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=200,\nvalidation_data=[x_test, y_test])\n\n\nfrom sklearn.metrics import (average_precision_score, precision_score,\n recall_score, roc_auc_score)\n\ndef f1(y_true, y_pred):\n precision = precision_score(y_true, y_pred)\n recall = recall_score(y_true, y_pred)\n return 2 * ((precision * recall) / (precision + recall))\n\ndef accuracy(y_true, y_pred):\n num_correct = len([1 for true, pred in zip(y_true, y_pred) if true == pred])\n return num_correct/len(y_true)\n\nprint('model 1')\nprint('f1')\nprint(f1(y_test, model.predict_classes(x_test).reshape(-1)))\nprint('accuracy')\nprint(accuracy(y_test, model.predict_classes(x_test).reshape(-1)))\n\n\nprint('model 2')\nprint('f1')\nprint(f1(y_test, model_2.predict_classes(x_test).reshape(-1)))\nprint('accuracy')\nprint(accuracy(y_test, model_2.predict_classes(x_test).reshape(-1)))\n"
] | [
[
"numpy.asarray",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChadFulton/scipy | [
"6a7327e8bb8248b2ea165180bc602edf1ab33dda"
] | [
"scipy/spatial/tests/test_qhull.py"
] | [
"from __future__ import division, print_function, absolute_import\n\nimport os\nimport copy\n\nimport numpy as np\nfrom numpy.testing import (assert_equal, assert_almost_equal, run_module_suite,\n assert_, dec, assert_allclose, assert_array_equal,\n assert_raises)\nfrom scipy.lib.six import xrange\n\nimport scipy.spatial.qhull as qhull\nfrom scipy.spatial import cKDTree as KDTree\n\n\ndef sorted_tuple(x):\n return tuple(sorted(x))\n\n\ndef sorted_unique_tuple(x):\n return tuple(np.unique(x))\n\n\ndef assert_unordered_tuple_list_equal(a, b, tpl=tuple):\n if isinstance(a, np.ndarray):\n a = a.tolist()\n if isinstance(b, np.ndarray):\n b = b.tolist()\n a = list(map(tpl, a))\n a.sort()\n b = list(map(tpl, b))\n b.sort()\n assert_equal(a, b)\n\nnp.random.seed(1234)\n\npoints = [(0,0), (0,1), (1,0), (1,1), (0.5, 0.5), (0.5, 1.5)]\n\npathological_data_1 = np.array([\n [-3.14,-3.14], [-3.14,-2.36], [-3.14,-1.57], [-3.14,-0.79],\n [-3.14,0.0], [-3.14,0.79], [-3.14,1.57], [-3.14,2.36],\n [-3.14,3.14], [-2.36,-3.14], [-2.36,-2.36], [-2.36,-1.57],\n [-2.36,-0.79], [-2.36,0.0], [-2.36,0.79], [-2.36,1.57],\n [-2.36,2.36], [-2.36,3.14], [-1.57,-0.79], [-1.57,0.79],\n [-1.57,-1.57], [-1.57,0.0], [-1.57,1.57], [-1.57,-3.14],\n [-1.57,-2.36], [-1.57,2.36], [-1.57,3.14], [-0.79,-1.57],\n [-0.79,1.57], [-0.79,-3.14], [-0.79,-2.36], [-0.79,-0.79],\n [-0.79,0.0], [-0.79,0.79], [-0.79,2.36], [-0.79,3.14],\n [0.0,-3.14], [0.0,-2.36], [0.0,-1.57], [0.0,-0.79], [0.0,0.0],\n [0.0,0.79], [0.0,1.57], [0.0,2.36], [0.0,3.14], [0.79,-3.14],\n [0.79,-2.36], [0.79,-0.79], [0.79,0.0], [0.79,0.79],\n [0.79,2.36], [0.79,3.14], [0.79,-1.57], [0.79,1.57],\n [1.57,-3.14], [1.57,-2.36], [1.57,2.36], [1.57,3.14],\n [1.57,-1.57], [1.57,0.0], [1.57,1.57], [1.57,-0.79],\n [1.57,0.79], [2.36,-3.14], [2.36,-2.36], [2.36,-1.57],\n [2.36,-0.79], [2.36,0.0], [2.36,0.79], [2.36,1.57],\n [2.36,2.36], [2.36,3.14], [3.14,-3.14], [3.14,-2.36],\n [3.14,-1.57], [3.14,-0.79], [3.14,0.0], [3.14,0.79],\n [3.14,1.57], [3.14,2.36], [3.14,3.14],\n])\n\npathological_data_2 = np.array([\n [-1, -1], [-1, 0], [-1, 1],\n [0, -1], [0, 0], [0, 1],\n [1, -1 - np.finfo(np.float_).eps], [1, 0], [1, 1],\n])\n\nbug_2850_chunks = [np.random.rand(10, 2),\n np.array([[0,0], [0,1], [1,0], [1,1]]) # add corners\n ]\n\n# same with some additional chunks\nbug_2850_chunks_2 = (bug_2850_chunks +\n [np.random.rand(10, 2),\n 0.25 + np.array([[0,0], [0,1], [1,0], [1,1]])])\n\nDATASETS = {\n 'some-points': np.asarray(points),\n 'random-2d': np.random.rand(30, 2),\n 'random-3d': np.random.rand(30, 3),\n 'random-4d': np.random.rand(30, 4),\n 'random-5d': np.random.rand(30, 5),\n 'random-6d': np.random.rand(10, 6),\n 'random-7d': np.random.rand(10, 7),\n 'random-8d': np.random.rand(10, 8),\n 'pathological-1': pathological_data_1,\n 'pathological-2': pathological_data_2\n}\n\nINCREMENTAL_DATASETS = {\n 'bug-2850': (bug_2850_chunks, None),\n 'bug-2850-2': (bug_2850_chunks_2, None),\n}\n\n\ndef _add_inc_data(name, chunksize):\n \"\"\"\n Generate incremental datasets from basic data sets\n \"\"\"\n points = DATASETS[name]\n ndim = points.shape[1]\n\n opts = None\n nmin = ndim + 2\n\n if name == 'some-points':\n # since Qz is not allowed, use QJ\n opts = 'QJ Pp'\n elif name == 'pathological-1':\n # include enough points so that we get different x-coordinates\n nmin = 12\n\n chunks = [points[:nmin]]\n for j in xrange(nmin, len(points), chunksize):\n chunks.append(points[j:j+chunksize])\n\n new_name = \"%s-chunk-%d\" % (name, chunksize)\n assert new_name not in INCREMENTAL_DATASETS\n INCREMENTAL_DATASETS[new_name] = (chunks, opts)\n\nfor name in DATASETS:\n for chunksize in 1, 4, 16:\n _add_inc_data(name, chunksize)\n\n\nclass Test_Qhull(object):\n def test_swapping(self):\n # Check that Qhull state swapping works\n\n x = qhull._Qhull(b'v',\n np.array([[0,0],[0,1],[1,0],[1,1.],[0.5,0.5]]),\n b'Qz')\n xd = copy.deepcopy(x.get_voronoi_diagram())\n\n y = qhull._Qhull(b'v',\n np.array([[0,0],[0,1],[1,0],[1,2.]]),\n b'Qz')\n yd = copy.deepcopy(y.get_voronoi_diagram())\n\n xd2 = copy.deepcopy(x.get_voronoi_diagram())\n x.close()\n yd2 = copy.deepcopy(y.get_voronoi_diagram())\n y.close()\n\n assert_raises(RuntimeError, x.get_voronoi_diagram)\n assert_raises(RuntimeError, y.get_voronoi_diagram)\n\n assert_allclose(xd[0], xd2[0])\n assert_unordered_tuple_list_equal(xd[1], xd2[1], tpl=sorted_tuple)\n assert_unordered_tuple_list_equal(xd[2], xd2[2], tpl=sorted_tuple)\n assert_unordered_tuple_list_equal(xd[3], xd2[3], tpl=sorted_tuple)\n assert_array_equal(xd[4], xd2[4])\n\n assert_allclose(yd[0], yd2[0])\n assert_unordered_tuple_list_equal(yd[1], yd2[1], tpl=sorted_tuple)\n assert_unordered_tuple_list_equal(yd[2], yd2[2], tpl=sorted_tuple)\n assert_unordered_tuple_list_equal(yd[3], yd2[3], tpl=sorted_tuple)\n assert_array_equal(yd[4], yd2[4])\n\n x.close()\n assert_raises(RuntimeError, x.get_voronoi_diagram)\n y.close()\n assert_raises(RuntimeError, y.get_voronoi_diagram)\n\n\nclass TestUtilities(object):\n \"\"\"\n Check that utility functions work.\n\n \"\"\"\n\n def test_find_simplex(self):\n # Simple check that simplex finding works\n points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)\n tri = qhull.Delaunay(points)\n\n # +---+\n # |\\ 0|\n # | \\ |\n # |1 \\|\n # +---+\n\n assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]])\n\n for p in [(0.25, 0.25, 1),\n (0.75, 0.75, 0),\n (0.3, 0.2, 1)]:\n i = tri.find_simplex(p[:2])\n assert_equal(i, p[2], err_msg='%r' % (p,))\n j = qhull.tsearch(tri, p[:2])\n assert_equal(i, j)\n\n def test_plane_distance(self):\n # Compare plane distance from hyperplane equations obtained from Qhull\n # to manually computed plane equations\n x = np.array([(0,0), (1, 1), (1, 0), (0.99189033, 0.37674127),\n (0.99440079, 0.45182168)], dtype=np.double)\n p = np.array([0.99966555, 0.15685619], dtype=np.double)\n\n tri = qhull.Delaunay(x)\n\n z = tri.lift_points(x)\n pz = tri.lift_points(p)\n\n dist = tri.plane_distance(p)\n\n for j, v in enumerate(tri.vertices):\n x1 = z[v[0]]\n x2 = z[v[1]]\n x3 = z[v[2]]\n\n n = np.cross(x1 - x3, x2 - x3)\n n /= np.sqrt(np.dot(n, n))\n n *= -np.sign(n[2])\n\n d = np.dot(n, pz - x3)\n\n assert_almost_equal(dist[j], d)\n\n def test_convex_hull(self):\n # Simple check that the convex hull seems to works\n points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)\n tri = qhull.Delaunay(points)\n\n # +---+\n # |\\ 0|\n # | \\ |\n # |1 \\|\n # +---+\n\n assert_equal(tri.convex_hull, [[3, 2], [1, 2], [1, 0], [3, 0]])\n\n def _check_barycentric_transforms(self, tri, err_msg=\"\",\n unit_cube=False,\n unit_cube_tol=0):\n \"\"\"Check that a triangulation has reasonable barycentric transforms\"\"\"\n vertices = tri.points[tri.vertices]\n sc = 1/(tri.ndim + 1.0)\n centroids = vertices.sum(axis=1) * sc\n\n # Either: (i) the simplex has a `nan` barycentric transform,\n # or, (ii) the centroid is in the simplex\n\n def barycentric_transform(tr, x):\n ndim = tr.shape[1]\n r = tr[:,-1,:]\n Tinv = tr[:,:-1,:]\n return np.einsum('ijk,ik->ij', Tinv, x - r)\n\n eps = np.finfo(float).eps\n\n c = barycentric_transform(tri.transform, centroids)\n olderr = np.seterr(invalid=\"ignore\")\n try:\n ok = np.isnan(c).all(axis=1) | (abs(c - sc)/sc < 0.1).all(axis=1)\n finally:\n np.seterr(**olderr)\n\n assert_(ok.all(), \"%s %s\" % (err_msg, np.where(~ok)))\n\n # Invalid simplices must be (nearly) zero volume\n q = vertices[:,:-1,:] - vertices[:,-1,None,:]\n volume = np.array([np.linalg.det(q[k,:,:])\n for k in range(tri.nsimplex)])\n ok = np.isfinite(tri.transform[:,0,0]) | (volume < np.sqrt(eps))\n assert_(ok.all(), \"%s %s\" % (err_msg, np.where(~ok)))\n\n # Also, find_simplex for the centroid should end up in some\n # simplex for the non-degenerate cases\n j = tri.find_simplex(centroids)\n ok = (j != -1) | np.isnan(tri.transform[:,0,0])\n assert_(ok.all(), \"%s %s\" % (err_msg, np.where(~ok)))\n\n if unit_cube:\n # If in unit cube, no interior point should be marked out of hull\n at_boundary = (centroids <= unit_cube_tol).any(axis=1)\n at_boundary |= (centroids >= 1 - unit_cube_tol).any(axis=1)\n\n ok = (j != -1) | at_boundary\n assert_(ok.all(), \"%s %s\" % (err_msg, np.where(~ok)))\n\n def test_degenerate_barycentric_transforms(self):\n # The triangulation should not produce invalid barycentric\n # transforms that stump the simplex finding\n data = np.load(os.path.join(os.path.dirname(__file__), 'data',\n 'degenerate_pointset.npz'))\n points = data['c']\n data.close()\n\n tri = qhull.Delaunay(points)\n\n # Check that there are not too many invalid simplices\n bad_count = np.isnan(tri.transform[:,0,0]).sum()\n assert_(bad_count < 20, bad_count)\n\n # Check the transforms\n self._check_barycentric_transforms(tri)\n\n @dec.slow\n def test_more_barycentric_transforms(self):\n # Triangulate some \"nasty\" grids\n\n eps = np.finfo(float).eps\n\n npoints = {2: 70, 3: 11, 4: 5, 5: 3}\n\n for ndim in xrange(2, 6):\n # Generate an uniform grid in n-d unit cube\n x = np.linspace(0, 1, npoints[ndim])\n grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T\n\n err_msg = \"ndim=%d\" % ndim\n\n # Check using regular grid\n tri = qhull.Delaunay(grid)\n self._check_barycentric_transforms(tri, err_msg=err_msg,\n unit_cube=True)\n\n # Check with eps-perturbations\n np.random.seed(1234)\n m = (np.random.rand(grid.shape[0]) < 0.2)\n grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5)\n\n tri = qhull.Delaunay(grid)\n self._check_barycentric_transforms(tri, err_msg=err_msg,\n unit_cube=True,\n unit_cube_tol=2*eps)\n\n # Check with duplicated data\n tri = qhull.Delaunay(np.r_[grid, grid])\n self._check_barycentric_transforms(tri, err_msg=err_msg,\n unit_cube=True,\n unit_cube_tol=2*eps)\n\n # Check with larger perturbations\n np.random.seed(4321)\n m = (np.random.rand(grid.shape[0]) < 0.2)\n grid[m,:] += 1000*eps*(np.random.rand(*grid[m,:].shape) - 0.5)\n\n tri = qhull.Delaunay(grid)\n self._check_barycentric_transforms(tri, err_msg=err_msg,\n unit_cube=True,\n unit_cube_tol=1500*eps)\n\n # Check with yet larger perturbations\n np.random.seed(4321)\n m = (np.random.rand(grid.shape[0]) < 0.2)\n grid[m,:] += 1e6*eps*(np.random.rand(*grid[m,:].shape) - 0.5)\n\n tri = qhull.Delaunay(grid)\n self._check_barycentric_transforms(tri, err_msg=err_msg,\n unit_cube=True,\n unit_cube_tol=1e7*eps)\n\n\nclass TestVertexNeighborVertices(object):\n def _check(self, tri):\n expected = [set() for j in range(tri.points.shape[0])]\n for s in tri.simplices:\n for a in s:\n for b in s:\n if a != b:\n expected[a].add(b)\n\n indices, indptr = tri.vertex_neighbor_vertices\n\n got = []\n for j in range(tri.points.shape[0]):\n got.append(set(map(int, indptr[indices[j]:indices[j+1]])))\n\n assert_equal(got, expected, err_msg=\"%r != %r\" % (got, expected))\n\n def test_triangle(self):\n points = np.array([(0,0), (0,1), (1,0)], dtype=np.double)\n tri = qhull.Delaunay(points)\n self._check(tri)\n\n def test_rectangle(self):\n points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)\n tri = qhull.Delaunay(points)\n self._check(tri)\n\n def test_complicated(self):\n points = np.array([(0,0), (0,1), (1,1), (1,0),\n (0.5, 0.5), (0.9, 0.5)], dtype=np.double)\n tri = qhull.Delaunay(points)\n self._check(tri)\n\n\nclass TestDelaunay(object):\n \"\"\"\n Check that triangulation works.\n\n \"\"\"\n def test_masked_array_fails(self):\n masked_array = np.ma.masked_all(1)\n assert_raises(ValueError, qhull.Delaunay, masked_array)\n\n def test_nd_simplex(self):\n # simple smoke test: triangulate a n-dimensional simplex\n for nd in xrange(2, 8):\n points = np.zeros((nd+1, nd))\n for j in xrange(nd):\n points[j,j] = 1.0\n points[-1,:] = 1.0\n\n tri = qhull.Delaunay(points)\n\n tri.vertices.sort()\n\n assert_equal(tri.vertices, np.arange(nd+1, dtype=np.int)[None,:])\n assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=np.int)[None,:])\n\n def test_2d_square(self):\n # simple smoke test: 2d square\n points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)\n tri = qhull.Delaunay(points)\n\n assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]])\n assert_equal(tri.neighbors, [[-1, -1, 1], [-1, -1, 0]])\n\n def test_duplicate_points(self):\n x = np.array([0, 1, 0, 1], dtype=np.float64)\n y = np.array([0, 0, 1, 1], dtype=np.float64)\n\n xp = np.r_[x, x]\n yp = np.r_[y, y]\n\n # shouldn't fail on duplicate points\n tri = qhull.Delaunay(np.c_[x, y])\n tri2 = qhull.Delaunay(np.c_[xp, yp])\n\n def test_pathological(self):\n # both should succeed\n points = DATASETS['pathological-1']\n tri = qhull.Delaunay(points)\n assert_equal(tri.points[tri.vertices].max(), points.max())\n assert_equal(tri.points[tri.vertices].min(), points.min())\n\n points = DATASETS['pathological-2']\n tri = qhull.Delaunay(points)\n assert_equal(tri.points[tri.vertices].max(), points.max())\n assert_equal(tri.points[tri.vertices].min(), points.min())\n\n def test_joggle(self):\n # Check that the option QJ indeed guarantees that all input points\n # occur as vertices of the triangulation\n\n points = np.random.rand(10, 2)\n points = np.r_[points, points] # duplicate input data\n\n tri = qhull.Delaunay(points, qhull_options=\"QJ Qbb Pp\")\n assert_array_equal(np.unique(tri.simplices.ravel()),\n np.arange(len(points)))\n\n def test_coplanar(self):\n # Check that the coplanar point output option indeed works\n points = np.random.rand(10, 2)\n points = np.r_[points, points] # duplicate input data\n\n tri = qhull.Delaunay(points)\n\n assert_(len(np.unique(tri.simplices.ravel())) == len(points)//2)\n assert_(len(tri.coplanar) == len(points)//2)\n\n assert_(len(np.unique(tri.coplanar[:,2])) == len(points)//2)\n\n assert_(np.all(tri.vertex_to_simplex >= 0))\n\n def test_furthest_site(self):\n points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]\n tri = qhull.Delaunay(points, furthest_site=True)\n\n expected = np.array([(1, 4, 0), (4, 2, 0)]) # from Qhull\n assert_array_equal(tri.simplices, expected)\n\n def test_incremental(self):\n # Test incremental construction of the triangulation\n\n def check(name):\n chunks, opts = INCREMENTAL_DATASETS[name]\n points = np.concatenate(chunks, axis=0)\n\n obj = qhull.Delaunay(chunks[0], incremental=True,\n qhull_options=opts)\n for chunk in chunks[1:]:\n obj.add_points(chunk)\n\n obj2 = qhull.Delaunay(points)\n\n obj3 = qhull.Delaunay(chunks[0], incremental=True,\n qhull_options=opts)\n if len(chunks) > 1:\n obj3.add_points(np.concatenate(chunks[1:], axis=0),\n restart=True)\n\n # Check that the incremental mode agrees with upfront mode\n if name.startswith('pathological'):\n # XXX: These produce valid but different triangulations.\n # They look OK when plotted, but how to check them?\n\n assert_array_equal(np.unique(obj.simplices.ravel()),\n np.arange(points.shape[0]))\n assert_array_equal(np.unique(obj2.simplices.ravel()),\n np.arange(points.shape[0]))\n else:\n assert_unordered_tuple_list_equal(obj.simplices, obj2.simplices,\n tpl=sorted_tuple)\n\n assert_unordered_tuple_list_equal(obj2.simplices, obj3.simplices,\n tpl=sorted_tuple)\n\n for name in sorted(INCREMENTAL_DATASETS):\n yield check, name\n\n\ndef assert_hulls_equal(points, facets_1, facets_2):\n # Check that two convex hulls constructed from the same point set\n # are equal\n\n facets_1 = set(map(sorted_tuple, facets_1))\n facets_2 = set(map(sorted_tuple, facets_2))\n\n if facets_1 != facets_2 and points.shape[1] == 2:\n # The direct check fails for the pathological cases\n # --- then the convex hull from Delaunay differs (due\n # to rounding error etc.) from the hull computed\n # otherwise, by the question whether (tricoplanar)\n # points that lie almost exactly on the hull are\n # included as vertices of the hull or not.\n #\n # So we check the result, and accept it if the Delaunay\n # hull line segments are a subset of the usual hull.\n\n eps = 1000 * np.finfo(float).eps\n\n for a, b in facets_1:\n for ap, bp in facets_2:\n t = points[bp] - points[ap]\n t /= np.linalg.norm(t) # tangent\n n = np.array([-t[1], t[0]]) # normal\n\n # check that the two line segments are parallel\n # to the same line\n c1 = np.dot(n, points[b] - points[ap])\n c2 = np.dot(n, points[a] - points[ap])\n if not np.allclose(np.dot(c1, n), 0):\n continue\n if not np.allclose(np.dot(c2, n), 0):\n continue\n\n # Check that the segment (a, b) is contained in (ap, bp)\n c1 = np.dot(t, points[a] - points[ap])\n c2 = np.dot(t, points[b] - points[ap])\n c3 = np.dot(t, points[bp] - points[ap])\n if c1 < -eps or c1 > c3 + eps:\n continue\n if c2 < -eps or c2 > c3 + eps:\n continue\n\n # OK:\n break\n else:\n raise AssertionError(\"comparison fails\")\n\n # it was OK\n return\n\n assert_equal(facets_1, facets_2)\n\n\nclass TestConvexHull:\n def test_masked_array_fails(self):\n masked_array = np.ma.masked_all(1)\n assert_raises(ValueError, qhull.ConvexHull, masked_array)\n\n def test_hull_consistency_tri(self):\n # Check that a convex hull returned by qhull in ndim\n # and the hull constructed from ndim delaunay agree\n def check(name):\n points = DATASETS[name]\n\n tri = qhull.Delaunay(points)\n hull = qhull.ConvexHull(points)\n\n assert_hulls_equal(points, tri.convex_hull, hull.simplices)\n\n # Check that the hull extremes are as expected\n if points.shape[1] == 2:\n assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))\n else:\n assert_equal(np.unique(hull.simplices), hull.vertices)\n\n for name in sorted(DATASETS):\n yield check, name\n\n def test_incremental(self):\n # Test incremental construction of the convex hull\n def check(name):\n chunks, _ = INCREMENTAL_DATASETS[name]\n points = np.concatenate(chunks, axis=0)\n\n obj = qhull.ConvexHull(chunks[0], incremental=True)\n for chunk in chunks[1:]:\n obj.add_points(chunk)\n\n obj2 = qhull.ConvexHull(points)\n\n obj3 = qhull.ConvexHull(chunks[0], incremental=True)\n if len(chunks) > 1:\n obj3.add_points(np.concatenate(chunks[1:], axis=0),\n restart=True)\n\n # Check that the incremental mode agrees with upfront mode\n assert_hulls_equal(points, obj.simplices, obj2.simplices)\n assert_hulls_equal(points, obj.simplices, obj3.simplices)\n\n for name in sorted(INCREMENTAL_DATASETS):\n yield check, name\n\n def test_vertices_2d(self):\n # The vertices should be in counterclockwise order in 2-D\n np.random.seed(1234)\n points = np.random.rand(30, 2)\n\n hull = qhull.ConvexHull(points)\n assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))\n\n # Check counterclockwiseness\n x, y = hull.points[hull.vertices].T\n angle = np.arctan2(y - y.mean(), x - x.mean())\n assert_(np.all(np.diff(np.unwrap(angle)) > 0))\n\n\nclass TestVoronoi:\n def test_masked_array_fails(self):\n masked_array = np.ma.masked_all(1)\n assert_raises(ValueError, qhull.Voronoi, masked_array)\n\n def test_simple(self):\n # Simple case with known Voronoi diagram\n points = [(0, 0), (0, 1), (0, 2),\n (1, 0), (1, 1), (1, 2),\n (2, 0), (2, 1), (2, 2)]\n\n # qhull v o Fv Qbb Qc Qz < dat\n output = \"\"\"\n 2\n 5 10 1\n -10.101 -10.101\n 0.5 0.5\n 1.5 0.5\n 0.5 1.5\n 1.5 1.5\n 2 0 1\n 3 3 0 1\n 2 0 3\n 3 2 0 1\n 4 4 3 1 2\n 3 4 0 3\n 2 0 2\n 3 4 0 2\n 2 0 4\n 0\n 12\n 4 0 3 0 1\n 4 0 1 0 1\n 4 1 4 1 3\n 4 1 2 0 3\n 4 2 5 0 3\n 4 3 4 1 2\n 4 3 6 0 2\n 4 4 5 3 4\n 4 4 7 2 4\n 4 5 8 0 4\n 4 6 7 0 2\n 4 7 8 0 4\n \"\"\"\n self._compare_qvoronoi(points, output)\n\n def _compare_qvoronoi(self, points, output, **kw):\n \"\"\"Compare to output from 'qvoronoi o Fv < data' to Voronoi()\"\"\"\n\n # Parse output\n output = [list(map(float, x.split())) for x in output.strip().splitlines()]\n nvertex = int(output[1][0])\n vertices = list(map(tuple, output[3:2+nvertex])) # exclude inf\n nregion = int(output[1][1])\n regions = [[int(y)-1 for y in x[1:]]\n for x in output[2+nvertex:2+nvertex+nregion]]\n nridge = int(output[2+nvertex+nregion][0])\n ridge_points = [[int(y) for y in x[1:3]]\n for x in output[3+nvertex+nregion:]]\n ridge_vertices = [[int(y)-1 for y in x[3:]]\n for x in output[3+nvertex+nregion:]]\n\n # Compare results\n vor = qhull.Voronoi(points, **kw)\n\n def sorttuple(x):\n return tuple(sorted(x))\n\n assert_allclose(vor.vertices, vertices)\n assert_equal(set(map(tuple, vor.regions)),\n set(map(tuple, regions)))\n\n p1 = list(zip(list(map(sorttuple, ridge_points)), list(map(sorttuple, ridge_vertices))))\n p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())),\n list(map(sorttuple, vor.ridge_vertices))))\n p1.sort()\n p2.sort()\n\n assert_equal(p1, p2)\n\n def test_ridges(self):\n # Check that the ridges computed by Voronoi indeed separate\n # the regions of nearest neighborhood, by comparing the result\n # to KDTree.\n\n def check(name):\n points = DATASETS[name]\n\n tree = KDTree(points)\n vor = qhull.Voronoi(points)\n\n for p, v in vor.ridge_dict.items():\n # consider only finite ridges\n if not np.all(np.asarray(v) >= 0):\n continue\n\n ridge_midpoint = vor.vertices[v].mean(axis=0)\n d = 1e-6 * (points[p[0]] - ridge_midpoint)\n\n dist, k = tree.query(ridge_midpoint + d, k=1)\n assert_equal(k, p[0])\n\n dist, k = tree.query(ridge_midpoint - d, k=1)\n assert_equal(k, p[1])\n\n for name in DATASETS:\n yield check, name\n\n def test_furthest_site(self):\n points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]\n\n # qhull v o Fv Qbb Qc Qu < dat\n output = \"\"\"\n 2\n 3 5 1\n -10.101 -10.101\n 0.6000000000000001 0.5\n 0.5 0.6000000000000001\n 3 0 1 2\n 2 0 1\n 2 0 2\n 0\n 3 0 1 2\n 5\n 4 0 2 0 2\n 4 0 1 0 1\n 4 0 4 1 2\n 4 1 4 0 1\n 4 2 4 0 2\n \"\"\"\n self._compare_qvoronoi(points, output, furthest_site=True)\n\n def test_incremental(self):\n # Test incremental construction of the triangulation\n\n def check(name):\n chunks, opts = INCREMENTAL_DATASETS[name]\n points = np.concatenate(chunks, axis=0)\n\n obj = qhull.Voronoi(chunks[0], incremental=True,\n qhull_options=opts)\n for chunk in chunks[1:]:\n obj.add_points(chunk)\n\n obj2 = qhull.Voronoi(points)\n\n obj3 = qhull.Voronoi(chunks[0], incremental=True,\n qhull_options=opts)\n if len(chunks) > 1:\n obj3.add_points(np.concatenate(chunks[1:], axis=0),\n restart=True)\n\n # -- Check that the incremental mode agrees with upfront mode\n\n # The vertices may be in different order or duplicated in\n # the incremental map\n for objx in obj, obj3:\n vertex_map = {-1: -1}\n for i, v in enumerate(objx.vertices):\n for j, v2 in enumerate(obj2.vertices):\n if np.allclose(v, v2):\n vertex_map[i] = j\n\n def remap(x):\n if hasattr(x, '__len__'):\n return tuple(set([remap(y) for y in x]))\n try:\n return vertex_map[x]\n except KeyError:\n raise AssertionError(\"incremental result has spurious vertex at %r\"\n % (objx.vertices[x],))\n\n def simplified(x):\n items = set(map(sorted_tuple, x))\n if () in items:\n items.remove(())\n items = [x for x in items if len(x) > 1]\n items.sort()\n return items\n\n assert_equal(\n simplified(remap(objx.regions)),\n simplified(obj2.regions)\n )\n assert_equal(\n simplified(remap(objx.ridge_vertices)),\n simplified(obj2.ridge_vertices)\n )\n\n # XXX: compare ridge_points --- not clear exactly how to do this\n\n for name in sorted(INCREMENTAL_DATASETS):\n if INCREMENTAL_DATASETS[name][0][0].shape[1] > 3:\n # too slow (testing of the result --- qhull is still fast)\n continue\n\n yield check, name\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
] | [
[
"numpy.dot",
"numpy.sqrt",
"numpy.einsum",
"numpy.linspace",
"numpy.asarray",
"numpy.all",
"numpy.seterr",
"numpy.concatenate",
"scipy.spatial.qhull.ConvexHull",
"scipy.lib.six.xrange",
"numpy.cross",
"numpy.where",
"scipy.spatial.cKDTree",
"numpy.testing.assert_equal",
"scipy.spatial.qhull.Delaunay",
"scipy.spatial.qhull.Voronoi",
"numpy.ix_",
"numpy.allclose",
"numpy.unique",
"numpy.arange",
"numpy.finfo",
"numpy.linalg.det",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.isnan",
"numpy.testing.assert_raises",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.testing.assert_allclose",
"scipy.spatial.qhull.tsearch",
"numpy.ma.masked_all",
"numpy.array",
"numpy.testing.run_module_suite",
"numpy.random.seed",
"numpy.isfinite",
"numpy.linalg.norm",
"numpy.sort",
"numpy.testing.assert_array_equal",
"numpy.sign",
"numpy.unwrap"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zentralwerkstatt/CLIP | [
"d8dac58224e648cf9aa5bd06c4e3a88152ce15f3"
] | [
"clip/model.py"
] | [
"from collections import OrderedDict\nfrom typing import Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1):\n super().__init__()\n\n # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1\n self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n\n self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()\n\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = None\n self.stride = stride\n\n if stride > 1 or inplanes != planes * Bottleneck.expansion:\n # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1\n self.downsample = nn.Sequential(OrderedDict([\n (\"-1\", nn.AvgPool2d(stride)),\n (\"0\", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),\n (\"1\", nn.BatchNorm2d(planes * self.expansion))\n ]))\n\n def forward(self, x: torch.Tensor):\n identity = x\n\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.relu(self.bn2(self.conv2(out)))\n out = self.avgpool(out)\n out = self.bn3(self.conv3(out))\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n return out\n\n\nclass AttentionPool2d(nn.Module):\n def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):\n super().__init__()\n self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\n self.num_heads = num_heads\n\n def forward(self, x):\n x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\n x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\n x, _ = F.multi_head_attention_forward(\n query=x, key=x, value=x,\n embed_dim_to_check=x.shape[-1],\n num_heads=self.num_heads,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n in_proj_weight=None,\n in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),\n bias_k=None,\n bias_v=None,\n add_zero_attn=False,\n dropout_p=0,\n out_proj_weight=self.c_proj.weight,\n out_proj_bias=self.c_proj.bias,\n use_separate_proj_weight=True,\n training=self.training,\n need_weights=False\n )\n\n return x[0]\n\n\nclass ModifiedResNet(nn.Module):\n \"\"\"\n A ResNet class that is similar to torchvision's but contains the following changes:\n - There are now 3 \"stem\" convolutions as opposed to 1, with an average pool instead of a max pool.\n - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1\n - The final pooling layer is a QKV attention instead of an average pool\n \"\"\"\n\n def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):\n super().__init__()\n self.output_dim = output_dim\n self.input_resolution = input_resolution\n\n # the 3-layer stem\n self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width // 2)\n self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(width // 2)\n self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(width)\n self.avgpool = nn.AvgPool2d(2)\n self.relu = nn.ReLU(inplace=True)\n\n # residual layers\n self._inplanes = width # this is a *mutable* variable used during construction\n self.layer1 = self._make_layer(width, layers[0])\n self.layer2 = self._make_layer(width * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(width * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(width * 8, layers[3], stride=2)\n\n embed_dim = width * 32 # the ResNet feature dimension\n self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)\n\n def _make_layer(self, planes, blocks, stride=1):\n layers = [Bottleneck(self._inplanes, planes, stride)]\n\n self._inplanes = planes * Bottleneck.expansion\n for _ in range(1, blocks):\n layers.append(Bottleneck(self._inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n def stem(x):\n for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:\n x = self.relu(bn(conv(x)))\n x = self.avgpool(x)\n return x\n\n x = x.type(self.conv1.weight.dtype)\n x = stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.attnpool(x)\n\n return x\n\n\nclass LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n ret = super().forward(x.type(torch.float32))\n return ret.type(orig_type)\n\n\nclass QuickGELU(nn.Module):\n def forward(self, x: torch.Tensor):\n return x * torch.sigmoid(1.702 * x)\n\n\nclass ResidualAttentionBlock(nn.Module):\n def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):\n super().__init__()\n\n self.attn = nn.MultiheadAttention(d_model, n_head)\n self.ln_1 = LayerNorm(d_model)\n self.mlp = nn.Sequential(OrderedDict([\n (\"c_fc\", nn.Linear(d_model, d_model * 4)),\n (\"gelu\", QuickGELU()),\n (\"c_proj\", nn.Linear(d_model * 4, d_model))\n ]))\n self.ln_2 = LayerNorm(d_model)\n self.attn_mask = attn_mask\n\n def attention(self, x: torch.Tensor):\n self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None\n return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]\n\n def forward(self, x: torch.Tensor):\n x = x + self.attention(self.ln_1(x))\n x = x + self.mlp(self.ln_2(x))\n return x\n\n\nclass Transformer(nn.Module):\n def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):\n super().__init__()\n self.width = width\n self.layers = layers\n self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])\n\n def forward(self, x: torch.Tensor):\n return self.resblocks(x)\n\n\nclass VisionTransformer(nn.Module):\n def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):\n super().__init__()\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n self.ln_pre = LayerNorm(width)\n\n self.transformer = Transformer(width, layers, heads)\n\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n def forward(self, x: torch.Tensor):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.positional_embedding.to(x.dtype)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n x = self.ln_post(x[:, 0, :])\n\n if self.proj is not None:\n x = x @ self.proj\n\n return x\n\n\nclass CLIP(nn.Module):\n def __init__(self,\n embed_dim: int,\n # vision\n image_resolution: int,\n vision_layers: Union[Tuple[int, int, int, int], int],\n vision_width: int,\n vision_patch_size: int,\n # text\n context_length: int,\n vocab_size: int,\n transformer_width: int,\n transformer_heads: int,\n transformer_layers: int\n ):\n super().__init__()\n\n self.context_length = context_length\n\n if isinstance(vision_layers, (tuple, list)):\n vision_heads = vision_width * 32 // 64\n self.visual = ModifiedResNet(\n layers=vision_layers,\n output_dim=embed_dim,\n heads=vision_heads,\n input_resolution=image_resolution,\n width=vision_width\n )\n else:\n vision_heads = vision_width // 64\n self.visual = VisionTransformer(\n input_resolution=image_resolution,\n patch_size=vision_patch_size,\n width=vision_width,\n layers=vision_layers,\n heads=vision_heads,\n output_dim=embed_dim\n )\n\n self.transformer = Transformer(\n width=transformer_width,\n layers=transformer_layers,\n heads=transformer_heads,\n attn_mask=self.build_attention_mask()\n )\n\n self.vocab_size = vocab_size\n self.token_embedding = nn.Embedding(vocab_size, transformer_width)\n self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))\n self.ln_final = LayerNorm(transformer_width)\n\n self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n self.initialize_parameters()\n\n def initialize_parameters(self):\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n\n if isinstance(self.visual, ModifiedResNet):\n if self.visual.attnpool is not None:\n std = self.visual.attnpool.c_proj.in_features ** -0.5\n nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)\n nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)\n nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)\n nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)\n\n for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:\n for name, param in resnet_block.named_parameters():\n if name.endswith(\"bn3.weight\"):\n nn.init.zeros_(param)\n\n proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)\n attn_std = self.transformer.width ** -0.5\n fc_std = (2 * self.transformer.width) ** -0.5\n for block in self.transformer.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n\n if self.text_projection is not None:\n nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n @property\n def dtype(self):\n return self.visual.conv1.weight.dtype\n\n def encode_image(self, image):\n return self.visual(image.type(self.dtype))\n\n def encode_text(self, text):\n x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding.type(self.dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x).type(self.dtype)\n\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n\n return x\n\n def forward(self, image, text):\n image_features = self.encode_image(image)\n text_features = self.encode_text(text)\n\n # normalized features\n image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n text_features = text_features / text_features.norm(dim=-1, keepdim=True)\n\n # cosine similarity as logits\n logit_scale = self.logit_scale.exp()\n logits_per_image = logit_scale * image_features @ text_features.t()\n logits_per_text = logits_per_image.t()\n\n # shape = [global_batch_size, global_batch_size]\n return logits_per_image, logits_per_text\n\n\ndef convert_weights(model: nn.Module):\n \"\"\"Convert applicable model parameters to fp16\"\"\"\n\n def _convert_weights_to_fp16(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()\n\n if isinstance(l, nn.MultiheadAttention):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.half()\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.half()\n\n model.apply(_convert_weights_to_fp16)\n\n\ndef build_model(state_dict: dict):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n vision_layers = len([k for k in state_dict.keys() if k.startswith(\"visual.\") and k.endswith(\".attn.in_proj_weight\")])\n vision_patch_size = state_dict[\"visual.conv1.weight\"].shape[-1]\n grid_size = round((state_dict[\"visual.positional_embedding\"].shape[0] - 1) ** 0.5)\n image_resolution = vision_patch_size * grid_size\n else:\n counts: list = [len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"visual.layer{b}\"))) for b in [1, 2, 3, 4]]\n vision_layers = tuple(counts)\n vision_width = state_dict[\"visual.layer1.0.conv1.weight\"].shape[0]\n output_width = round((state_dict[\"visual.attnpool.positional_embedding\"].shape[0] - 1) ** 0.5)\n vision_patch_size = None\n assert output_width ** 2 + 1 == state_dict[\"visual.attnpool.positional_embedding\"].shape[0]\n image_resolution = output_width * 32\n\n embed_dim = state_dict[\"text_projection\"].shape[1]\n context_length = state_dict[\"positional_embedding\"].shape[0]\n vocab_size = state_dict[\"token_embedding.weight\"].shape[0]\n transformer_width = state_dict[\"ln_final.weight\"].shape[0]\n transformer_heads = transformer_width // 64\n transformer_layers = len(set(k.split(\".\")[2] for k in state_dict if k.startswith(f\"transformer.resblocks\")))\n\n model = CLIP(\n embed_dim,\n image_resolution, vision_layers, vision_width, vision_patch_size,\n context_length, vocab_size, transformer_width, transformer_heads, transformer_layers\n )\n\n for key in [\"input_resolution\", \"context_length\", \"vocab_size\"]:\n if key in state_dict:\n del state_dict[key]\n\n convert_weights(model)\n model.load_state_dict(state_dict)\n return model.eval()\n"
] | [
[
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"torch.ones",
"torch.nn.MultiheadAttention",
"torch.randn",
"torch.arange",
"torch.nn.Sequential",
"torch.sigmoid",
"numpy.log",
"torch.empty",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.init.normal_",
"torch.nn.init.zeros_",
"torch.nn.BatchNorm2d",
"torch.nn.Identity",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
allanchua101/ipynta | [
"861c36b1c2d675611fcd5ed478d658f8180d03af"
] | [
"src/ipynta/transform/vflip.py"
] | [
"from .base import BaseTransform\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\nclass VFlipTransform(BaseTransform):\r\n \"\"\"Class used for creating a vertical flipped copies of images.\"\"\"\r\n \r\n def __init__(self):\r\n \"\"\"Constructs an instance of VFlipTransform.\"\"\"\r\n BaseTransform.__init__(self)\r\n\r\n def execute(self, img_list):\r\n \"\"\"Method used for creating vertically flipped copies of images\r\n \r\n Args:\r\n img_list list[PIL.Image]: A list of Pillow images to be used as seed image set.\r\n \r\n Returns:\r\n list[PIL.Image]: List of transformed images.\r\n \"\"\"\r\n if (img_list is None):\r\n return []\r\n\r\n output = []\r\n \r\n for img in img_list:\r\n tmp = img.copy()\r\n tmp = np.array(tmp)\r\n tmp = np.flipud(tmp)\r\n output.append(Image.fromarray(tmp))\r\n\r\n return output"
] | [
[
"numpy.flipud",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
m---w/pya | [
"5f3290842db95b722f6c41a97f153352da25353f"
] | [
"tests/test_ugen.py"
] | [
"from unittest import TestCase\nfrom pya import *\nimport numpy as np\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass TestUgen(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_sine(self):\n sine = Ugen().sine(freq=200, amp=0.5, dur=1.0, sr=44100 // 2, channels=2)\n self.assertEqual(44100 // 2, sine.sr)\n self.assertAlmostEqual(0.5, np.max(sine.sig), places=6)\n self.assertEqual((44100 // 2, 2), sine.sig.shape)\n\n def test_cos(self):\n cos = Ugen().cos(freq=200, amp=0.5, dur=1.0, sr=44100 // 2, channels=2)\n self.assertEqual(44100 // 2, cos.sr)\n self.assertAlmostEqual(0.5, np.max(cos.sig), places=6)\n self.assertEqual((44100 // 2, 2), cos.sig.shape)\n\n def test_square(self):\n square = Ugen().square(freq=200, amp=0.5, dur=1.0, sr=44100 // 2, channels=2)\n self.assertEqual(44100 // 2, square.sr)\n self.assertAlmostEqual(0.5, np.max(square.sig), places=6)\n self.assertEqual((44100 // 2, 2), square.sig.shape)\n\n def test_sawooth(self):\n saw = Ugen().sawtooth(freq=200, amp=0.5, dur=1.0, sr=44100 // 2, channels=2)\n self.assertEqual(44100 // 2, saw.sr)\n self.assertAlmostEqual(0.5, np.max(saw.sig), places=6)\n self.assertEqual((44100 // 2, 2), saw.sig.shape)\n\n def test_noise(self):\n white = Ugen().noise(type=\"white\")\n pink = Ugen().noise(type=\"pink\")\n"
] | [
[
"numpy.max"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jasmine216/Project_vmaf | [
"60b31d208600806939103dfbe41216f98c56628e"
] | [
"python/vmaf/core/feature_extractor.py"
] | [
"from abc import ABCMeta, abstractmethod\nfrom xml.etree import ElementTree\n\nfrom vmaf.tools.decorator import override\n\n__copyright__ = \"Copyright 2016-2020, Netflix, Inc.\"\n__license__ = \"BSD+Patent\"\n\nimport re\nimport numpy as np\nimport ast\n\nfrom vmaf import ExternalProgramCaller\nfrom vmaf.core.executor import Executor\nfrom vmaf.core.result import Result\nfrom vmaf.tools.reader import YuvReader\n\n\nclass FeatureExtractor(Executor):\n \"\"\"\n FeatureExtractor takes in a list of assets, and run feature extraction on\n them, and return a list of corresponding results. A FeatureExtractor must\n specify a unique type and version combination (by the TYPE and VERSION\n attribute), so that the Result generated by it can be identified.\n\n A derived class of FeatureExtractor must:\n 1) Override TYPE and VERSION\n 2) Override _generate_result(self, asset), which call a\n command-line executable and generate feature scores in a log file.\n 3) Override _get_feature_scores(self, asset), which read the feature\n scores from the log file, and return the scores in a dictionary format.\n For an example, follow VmafFeatureExtractor.\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n @property\n @abstractmethod\n def ATOM_FEATURES(self):\n raise NotImplementedError\n\n def _read_result(self, asset):\n result = {}\n result.update(self._get_feature_scores(asset))\n executor_id = self.executor_id\n return Result(asset, executor_id, result)\n\n @classmethod\n def get_scores_key(cls, atom_feature):\n return \"{type}_{atom_feature}_scores\".format(\n type=cls.TYPE, atom_feature=atom_feature)\n\n @classmethod\n def get_score_key(cls, atom_feature):\n return \"{type}_{atom_feature}_score\".format(\n type=cls.TYPE, atom_feature=atom_feature)\n\n def _get_feature_scores(self, asset):\n # routine to read the feature scores from the log file, and return\n # the scores in a dictionary format.\n\n log_file_path = self._get_log_file_path(asset)\n\n atom_feature_scores_dict = {}\n atom_feature_idx_dict = {}\n for atom_feature in self.ATOM_FEATURES:\n atom_feature_scores_dict[atom_feature] = []\n atom_feature_idx_dict[atom_feature] = 0\n\n with open(log_file_path, 'rt') as log_file:\n for line in log_file.readlines():\n for atom_feature in self.ATOM_FEATURES:\n re_template = \"{af}: ([0-9]+) ([a-zA-Z0-9.-]+)\".format(af=atom_feature)\n mo = re.match(re_template, line)\n if mo:\n\n cur_idx = int(mo.group(1))\n assert cur_idx == atom_feature_idx_dict[atom_feature]\n\n # parse value, allowing NaN and inf\n val = float(mo.group(2))\n if np.isnan(val) or np.isinf(val):\n val = None\n\n atom_feature_scores_dict[atom_feature].append(val)\n atom_feature_idx_dict[atom_feature] += 1\n continue\n\n len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]])\n assert len_score != 0\n for atom_feature in self.ATOM_FEATURES[1:]:\n assert len_score == len(atom_feature_scores_dict[atom_feature]), \\\n \"Feature data possibly corrupt. Run cleanup script and try again.\"\n\n feature_result = {}\n\n for atom_feature in self.ATOM_FEATURES:\n scores_key = self.get_scores_key(atom_feature)\n feature_result[scores_key] = atom_feature_scores_dict[atom_feature]\n\n return feature_result\n\n\nclass VmafrcFeatureExtractorMixin(object):\n\n @override(FeatureExtractor)\n def _get_feature_scores(self, asset):\n\n assert hasattr(self, '_get_log_file_path')\n assert hasattr(self, 'ATOM_FEATURES')\n assert hasattr(self, 'ATOM_FEATURES_TO_VMAFRC_KEY_DICT')\n assert hasattr(self, 'get_scores_key')\n\n log_file_path = self._get_log_file_path(asset)\n tree = ElementTree.parse(log_file_path)\n root = tree.getroot()\n\n feature_scores = [[] for _ in self.ATOM_FEATURES]\n\n for frame in root.findall('frames/frame'):\n for i_feature, feature in enumerate(self.ATOM_FEATURES):\n try:\n feature_scores[i_feature].append(float(frame.attrib[self.ATOM_FEATURES_TO_VMAFRC_KEY_DICT[feature]]))\n except KeyError:\n pass # some features may be missing\n\n for i_feature, feature in enumerate(self.ATOM_FEATURES):\n assert len(feature_scores[i_feature]) != 0\n assert len(feature_scores[i_feature]) == len(feature_scores[0])\n\n feature_result = {}\n for i_feature, feature in enumerate(self.ATOM_FEATURES):\n feature_result[self.get_scores_key(feature)] = feature_scores[i_feature]\n\n return feature_result\n\n\nclass VmafFeatureExtractor(VmafrcFeatureExtractorMixin, FeatureExtractor):\n\n TYPE = \"VMAF_feature\"\n\n # VERSION = '0.1' # vmaf_study; Anush's VIF fix\n # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr\n # VERSION = '0.2.1' # expose vif num/den of each scale\n # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case\n # VERSION = '0.2.2b' # expose adm_den/num_scalex\n # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef\n # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step\n # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2\n # VERSION = '0.2.4c' # Modify by moving motion2 to c code\n # VERSION = '0.2.5' # replace executable vmaf_feature with vmaf_rc\n # VERSION = '0.2.6' # incorporate adm_enhn_gain_limit and vif_enhn_gain_limit\n VERSION = '0.2.7' # move vif_enhn_gain_limit right before log calculation\n\n ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',\n 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',\n 'vif_num_scale0', 'vif_den_scale0',\n 'vif_num_scale1', 'vif_den_scale1',\n 'vif_num_scale2', 'vif_den_scale2',\n 'vif_num_scale3', 'vif_den_scale3',\n 'adm_num_scale0', 'adm_den_scale0',\n 'adm_num_scale1', 'adm_den_scale1',\n 'adm_num_scale2', 'adm_den_scale2',\n 'adm_num_scale3', 'adm_den_scale3',\n ]\n\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT = dict(zip(ATOM_FEATURES, ATOM_FEATURES))\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT['ansnr'] = 'float_ansnr'\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT['anpsnr'] = 'float_anpsnr'\n\n DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',\n 'vif2', 'adm2', 'adm3',\n 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',\n ]\n\n ADM2_CONSTANT = 0\n ADM_SCALE_CONSTANT = 0\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate feature\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n obj_path = asset.obj_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_vmafrc_multi_features(\n ['float_adm', 'float_vif', 'float_motion', 'float_ansnr'],\n yuv_type, ref_path, dis_path, w, h, log_file_path, logger, options={\n 'float_adm': {'debug': True},\n 'float_vif': {'debug': True},\n 'float_motion': {'debug': True},\n },obj_path=obj_path\n )\n\n @classmethod\n @override(Executor)\n def _post_process_result(cls, result):\n\n result = super(VmafFeatureExtractor, cls)._post_process_result(result)\n\n # adm2 =\n # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT)\n adm2_scores_key = cls.get_scores_key('adm2')\n adm_num_scores_key = cls.get_scores_key('adm_num')\n adm_den_scores_key = cls.get_scores_key('adm_den')\n result.result_dict[adm2_scores_key] = list(\n (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) /\n (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT)\n )\n\n # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3\n vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0')\n vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0')\n vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1')\n vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1')\n vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2')\n vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2')\n vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3')\n vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3')\n vif_scale0_scores_key = cls.get_scores_key('vif_scale0')\n vif_scale1_scores_key = cls.get_scores_key('vif_scale1')\n vif_scale2_scores_key = cls.get_scores_key('vif_scale2')\n vif_scale3_scores_key = cls.get_scores_key('vif_scale3')\n result.result_dict[vif_scale0_scores_key] = list(\n (np.array(result.result_dict[vif_num_scale0_scores_key])\n / np.array(result.result_dict[vif_den_scale0_scores_key]))\n )\n result.result_dict[vif_scale1_scores_key] = list(\n (np.array(result.result_dict[vif_num_scale1_scores_key])\n / np.array(result.result_dict[vif_den_scale1_scores_key]))\n )\n result.result_dict[vif_scale2_scores_key] = list(\n (np.array(result.result_dict[vif_num_scale2_scores_key])\n / np.array(result.result_dict[vif_den_scale2_scores_key]))\n )\n result.result_dict[vif_scale3_scores_key] = list(\n (np.array(result.result_dict[vif_num_scale3_scores_key])\n / np.array(result.result_dict[vif_den_scale3_scores_key]))\n )\n\n # vif2 =\n # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) +\n # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0\n vif_scores_key = cls.get_scores_key('vif2')\n result.result_dict[vif_scores_key] = list(\n (\n (np.array(result.result_dict[vif_num_scale0_scores_key])\n / np.array(result.result_dict[vif_den_scale0_scores_key])) +\n (np.array(result.result_dict[vif_num_scale1_scores_key])\n / np.array(result.result_dict[vif_den_scale1_scores_key])) +\n (np.array(result.result_dict[vif_num_scale2_scores_key])\n / np.array(result.result_dict[vif_den_scale2_scores_key])) +\n (np.array(result.result_dict[vif_num_scale3_scores_key])\n / np.array(result.result_dict[vif_den_scale3_scores_key]))\n ) / 4.0\n )\n\n # adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3\n adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0')\n adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0')\n adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1')\n adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1')\n adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2')\n adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2')\n adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3')\n adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3')\n adm_scale0_scores_key = cls.get_scores_key('adm_scale0')\n adm_scale1_scores_key = cls.get_scores_key('adm_scale1')\n adm_scale2_scores_key = cls.get_scores_key('adm_scale2')\n adm_scale3_scores_key = cls.get_scores_key('adm_scale3')\n result.result_dict[adm_scale0_scores_key] = list(\n (np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)\n )\n result.result_dict[adm_scale1_scores_key] = list(\n (np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)\n )\n result.result_dict[adm_scale2_scores_key] = list(\n (np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)\n )\n result.result_dict[adm_scale3_scores_key] = list(\n (np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)\n )\n\n # adm3 = \\\n # (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT))\n # + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT))\n # + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT))\n # + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0\n adm3_scores_key = cls.get_scores_key('adm3')\n result.result_dict[adm3_scores_key] = list(\n (\n ((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) +\n ((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) +\n ((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) +\n ((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)\n / (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT))\n ) / 4.0\n )\n\n # validate\n for feature in cls.DERIVED_ATOM_FEATURES:\n assert cls.get_scores_key(feature) in result.result_dict\n\n return result\n\n\nclass VifFrameDifferenceFeatureExtractor(FeatureExtractor):\n\n TYPE = \"VifDiff_feature\"\n\n VERSION = '0.1'\n\n ATOM_FEATURES = ['vifdiff',\n 'vifdiff_num', 'vifdiff_den',\n 'vifdiff_num_scale0', 'vifdiff_den_scale0',\n 'vifdiff_num_scale1', 'vifdiff_den_scale1',\n 'vifdiff_num_scale2', 'vifdiff_den_scale2',\n 'vifdiff_num_scale3', 'vifdiff_den_scale3',\n ]\n\n DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3',\n ]\n\n ADM2_CONSTANT = 0\n ADM_SCALE_CONSTANT = 0\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate feature\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)\n\n @classmethod\n @override(Executor)\n def _post_process_result(cls, result):\n\n result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result)\n\n # vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3\n vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0')\n vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0')\n vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1')\n vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1')\n vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2')\n vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2')\n vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3')\n vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3')\n vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0')\n vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1')\n vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2')\n vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3')\n result.result_dict[vifdiff_scale0_scores_key] = list(\n (np.array(result.result_dict[vifdiff_num_scale0_scores_key])\n / np.array(result.result_dict[vifdiff_den_scale0_scores_key]))\n )\n result.result_dict[vifdiff_scale1_scores_key] = list(\n (np.array(result.result_dict[vifdiff_num_scale1_scores_key])\n / np.array(result.result_dict[vifdiff_den_scale1_scores_key]))\n )\n result.result_dict[vifdiff_scale2_scores_key] = list(\n (np.array(result.result_dict[vifdiff_num_scale2_scores_key])\n / np.array(result.result_dict[vifdiff_den_scale2_scores_key]))\n )\n result.result_dict[vifdiff_scale3_scores_key] = list(\n (np.array(result.result_dict[vifdiff_num_scale3_scores_key])\n / np.array(result.result_dict[vifdiff_den_scale3_scores_key]))\n )\n\n # validate\n for feature in cls.DERIVED_ATOM_FEATURES:\n assert cls.get_scores_key(feature) in result.result_dict\n\n return result\n\n\nclass PsnrFeatureExtractor(VmafrcFeatureExtractorMixin, FeatureExtractor):\n\n TYPE = \"PSNR_feature\"\n # VERSION = \"1.0\"\n VERSION = \"1.1\" # call vmaf_rc to replace standalone psnr exec\n\n ATOM_FEATURES = ['psnr']\n\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT = {\n 'psnr': 'float_psnr',\n }\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate quality\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n obj_path=asset.obj_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_vmafrc_single_feature('float_psnr', yuv_type, ref_path, dis_path,w, h, log_file_path, logger,obj_path=obj_path)\n\n\nclass MomentFeatureExtractor(FeatureExtractor):\n\n TYPE = \"Moment_feature\"\n\n # VERSION = \"1.0\" # call executable\n VERSION = \"1.1\" # python only\n\n ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ]\n\n DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ]\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate feature\n # scores in the log file.\n\n quality_w, quality_h = asset.quality_width_height\n\n ref_scores_mtx = None\n with YuvReader(filepath=asset.ref_procfile_path, width=quality_w, height=quality_h,\n yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader:\n scores_mtx_list = []\n i = 0\n for ref_yuv in ref_yuv_reader:\n ref_y = ref_yuv[0]\n ref_y = ref_y.astype(np.double)\n firstm = ref_y.mean()\n secondm = ref_y.var() + firstm**2\n scores_mtx_list.append(np.hstack(([firstm], [secondm])))\n i += 1\n ref_scores_mtx = np.vstack(scores_mtx_list)\n\n dis_scores_mtx = None\n with YuvReader(filepath=asset.dis_procfile_path, width=quality_w, height=quality_h,\n yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader:\n scores_mtx_list = []\n i = 0\n for dis_yuv in dis_yuv_reader:\n dis_y = dis_yuv[0]\n dis_y = dis_y.astype(np.double)\n firstm = dis_y.mean()\n secondm = dis_y.var() + firstm**2\n scores_mtx_list.append(np.hstack(([firstm], [secondm])))\n i += 1\n dis_scores_mtx = np.vstack(scores_mtx_list)\n\n assert ref_scores_mtx is not None and dis_scores_mtx is not None\n\n log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(),\n 'dis_scores_mtx': dis_scores_mtx.tolist()}\n\n log_file_path = self._get_log_file_path(asset)\n with open(log_file_path, 'wt') as log_file:\n log_file.write(str(log_dict))\n\n def _get_feature_scores(self, asset):\n # routine to read the feature scores from the log file, and return\n # the scores in a dictionary format.\n\n log_file_path = self._get_log_file_path(asset)\n\n with open(log_file_path, 'rt') as log_file:\n log_str = log_file.read()\n log_dict = ast.literal_eval(log_str)\n ref_scores_mtx = np.array(log_dict['ref_scores_mtx'])\n dis_scores_mtx = np.array(log_dict['dis_scores_mtx'])\n\n _, num_ref_features = ref_scores_mtx.shape\n assert num_ref_features == 2 # ref1st, ref2nd\n _, num_dis_features = dis_scores_mtx.shape\n assert num_dis_features == 2 # dis1st, dis2nd\n\n feature_result = {}\n feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0])\n feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1])\n feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0])\n feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1])\n\n return feature_result\n\n @classmethod\n @override(Executor)\n def _post_process_result(cls, result):\n\n result = super(MomentFeatureExtractor, cls)._post_process_result(result)\n\n # calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd\n refvar_scores_key = cls.get_scores_key('refvar')\n ref1st_scores_key = cls.get_scores_key('ref1st')\n ref2nd_scores_key = cls.get_scores_key('ref2nd')\n disvar_scores_key = cls.get_scores_key('disvar')\n dis1st_scores_key = cls.get_scores_key('dis1st')\n dis2nd_scores_key = cls.get_scores_key('dis2nd')\n get_var = lambda m: m[1] - m[0] * m[0]\n result.result_dict[refvar_scores_key] = \\\n list(map(get_var, zip(result.result_dict[ref1st_scores_key],\n result.result_dict[ref2nd_scores_key])))\n result.result_dict[disvar_scores_key] = \\\n list(map(get_var, zip(result.result_dict[dis1st_scores_key],\n result.result_dict[dis2nd_scores_key])))\n\n # validate\n for feature in cls.DERIVED_ATOM_FEATURES:\n assert cls.get_scores_key(feature) in result.result_dict\n\n return result\n\n\nclass SsimFeatureExtractor(VmafrcFeatureExtractorMixin, FeatureExtractor):\n\n TYPE = \"SSIM_feature\"\n # VERSION = \"1.0\"\n # VERSION = \"1.1\" # fix OPT_RANGE_PIXEL_OFFSET = 0\n # VERSION = \"1.2\" # call vmaf_rc to replace standalone ssim exec\n VERSION = \"1.3\" # add ssim_l, ssim_c, ssim_s as optional output\n\n ATOM_FEATURES = ['ssim',\n 'ssim_l', 'ssim_c', 'ssim_s',\n ]\n\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT = {\n 'ssim': 'float_ssim',\n 'ssim_l': 'float_ssim_l',\n 'ssim_c': 'float_ssim_c',\n 'ssim_s': 'float_ssim_s',\n }\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate quality\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_vmafrc_single_feature('float_ssim', yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger, options={'enable_lcs': True})\n\n\nclass MsSsimFeatureExtractor(VmafrcFeatureExtractorMixin, FeatureExtractor):\n\n TYPE = \"MS_SSIM_feature\"\n # VERSION = \"1.0\"\n # VERSION = \"1.1\" # fix OPT_RANGE_PIXEL_OFFSET = 0\n # VERSION = \"1.2\" # call vmaf_rc to replace standalone ms_ssim exec\n VERSION = \"1.3\" # add ssim_l_scalex, ssim_c_scalex, ssim_s_scalex as optional output\n\n ATOM_FEATURES = ['ms_ssim',\n 'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0',\n 'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1',\n 'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2',\n 'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3',\n 'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4',\n ]\n\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT = {\n 'ms_ssim': 'float_ms_ssim',\n\n 'ms_ssim_l_scale0': 'float_ms_ssim_l_scale0',\n 'ms_ssim_l_scale1': 'float_ms_ssim_l_scale1',\n 'ms_ssim_l_scale2': 'float_ms_ssim_l_scale2',\n 'ms_ssim_l_scale3': 'float_ms_ssim_l_scale3',\n 'ms_ssim_l_scale4': 'float_ms_ssim_l_scale4',\n\n 'ms_ssim_c_scale0': 'float_ms_ssim_c_scale0',\n 'ms_ssim_c_scale1': 'float_ms_ssim_c_scale1',\n 'ms_ssim_c_scale2': 'float_ms_ssim_c_scale2',\n 'ms_ssim_c_scale3': 'float_ms_ssim_c_scale3',\n 'ms_ssim_c_scale4': 'float_ms_ssim_c_scale4',\n\n 'ms_ssim_s_scale0': 'float_ms_ssim_s_scale0',\n 'ms_ssim_s_scale1': 'float_ms_ssim_s_scale1',\n 'ms_ssim_s_scale2': 'float_ms_ssim_s_scale2',\n 'ms_ssim_s_scale3': 'float_ms_ssim_s_scale3',\n 'ms_ssim_s_scale4': 'float_ms_ssim_s_scale4',\n }\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate quality\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_vmafrc_single_feature('float_ms_ssim', yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger, options={'enable_lcs': True})\n\n\nclass AnsnrFeatureExtractor(VmafrcFeatureExtractorMixin, FeatureExtractor):\n\n TYPE = \"ANSNR_feature\"\n VERSION = \"1.0\"\n\n ATOM_FEATURES = ['ansnr', 'anpsnr']\n\n ATOM_FEATURES_TO_VMAFRC_KEY_DICT = {\n 'ansnr': 'float_ansnr',\n 'anpsnr': 'float_anpsnr',\n }\n\n def _generate_result(self, asset):\n # routine to call the command-line executable and generate quality\n # scores in the log file.\n\n quality_width, quality_height = asset.quality_width_height\n log_file_path = self._get_log_file_path(asset)\n\n yuv_type=self._get_workfile_yuv_type(asset)\n ref_path=asset.ref_procfile_path\n dis_path=asset.dis_procfile_path\n w=quality_width\n h=quality_height\n logger = self.logger\n\n ExternalProgramCaller.call_vmafrc_single_feature('float_ansnr', yuv_type, ref_path, dis_path, w, h, log_file_path, logger)\n"
] | [
[
"numpy.hstack",
"numpy.isnan",
"numpy.array",
"numpy.isinf",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HOUYONGKUO/D3Feat | [
"d005f3811c12764c16d4f5e9a01c6720e7e72392"
] | [
"geometric_registration/utils.py"
] | [
"import os\nimport open3d\nimport numpy as np\n\n\ndef get_pcd(pcdpath, filename):\n return open3d.read_point_cloud(os.path.join(pcdpath, filename + '.ply'))\n\n\ndef get_keypts(keyptspath, filename):\n keypts = np.load(os.path.join(keyptspath, filename + f'.npy'))\n return keypts\n\n\ndef get_desc(descpath, filename, desc_name):\n desc = np.load(os.path.join(descpath, filename + f'.{desc_name}.npy'))\n return desc\n\n\ndef loadlog(gtpath):\n with open(os.path.join(gtpath, 'gt.log')) as f:\n content = f.readlines()\n result = {}\n i = 0\n while i < len(content):\n line = content[i].replace(\"\\n\", \"\").split(\"\\t\")[0:3]\n trans = np.zeros([4, 4])\n trans[0] = [float(x) for x in content[i + 1].replace(\"\\n\", \"\").split(\"\\t\")[0:4]]\n trans[1] = [float(x) for x in content[i + 2].replace(\"\\n\", \"\").split(\"\\t\")[0:4]]\n trans[2] = [float(x) for x in content[i + 3].replace(\"\\n\", \"\").split(\"\\t\")[0:4]]\n trans[3] = [float(x) for x in content[i + 4].replace(\"\\n\", \"\").split(\"\\t\")[0:4]]\n i = i + 5\n result[f'{int(line[0])}_{int(line[1])}'] = trans\n\n return result\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ludaavics/numba | [
"d630c1cc7f261365aa92f6e3437abaaa185d8482",
"d630c1cc7f261365aa92f6e3437abaaa185d8482"
] | [
"numba/tests/test_tuples.py",
"numba/cuda/tests/cudapy/test_sm_creation.py"
] | [
"import collections\nimport itertools\n\nimport numpy as np\n\nfrom numba.core.compiler import compile_isolated\nfrom numba import njit, jit, typeof\nfrom numba.core import types, errors, utils\nfrom numba.tests.support import TestCase, MemoryLeakMixin, tag\nimport unittest\n\n\nRect = collections.namedtuple('Rect', ('width', 'height'))\n\nPoint = collections.namedtuple('Point', ('x', 'y', 'z'))\n\nPoint2 = collections.namedtuple('Point2', ('x', 'y', 'z'))\n\nEmpty = collections.namedtuple('Empty', ())\n\ndef tuple_return_usecase(a, b):\n return a, b\n\ndef tuple_first(tup):\n a, b = tup\n return a\n\ndef tuple_second(tup):\n a, b = tup\n return b\n\ndef tuple_index(tup, idx):\n return tup[idx]\n\ndef tuple_index_static(tup):\n # Note the negative index\n return tup[-2]\n\ndef tuple_slice2(tup):\n return tup[1:-1]\n\ndef tuple_slice3(tup):\n return tup[1::2]\n\ndef len_usecase(tup):\n return len(tup)\n\ndef add_usecase(a, b):\n return a + b\n\ndef eq_usecase(a, b):\n return a == b\n\ndef ne_usecase(a, b):\n return a != b\n\ndef gt_usecase(a, b):\n return a > b\n\ndef ge_usecase(a, b):\n return a >= b\n\ndef lt_usecase(a, b):\n return a < b\n\ndef le_usecase(a, b):\n return a <= b\n\ndef in_usecase(a, b):\n return a in b\n\ndef bool_usecase(tup):\n return bool(tup), (3 if tup else 2)\n\ndef getattr_usecase(tup):\n return tup.z, tup.y, tup.x\n\ndef make_point(a, b, c):\n return Point(a, b, c)\n\ndef make_point_kws(a, b, c):\n return Point(z=c, y=b, x=a)\n\ndef make_point_nrt(n):\n r = Rect(list(range(n)), np.zeros(n + 1))\n # This also exercises attribute access\n p = Point(r, len(r.width), len(r.height))\n return p\n\ndef type_usecase(tup, *args):\n return type(tup)(*args)\n\ndef identity(tup):\n return tup\n\ndef index_method_usecase(tup, value):\n return tup.index(value)\n\ndef tuple_unpack_static_getitem_err():\n # see issue3895, `c` is imprecise\n a, b, c, d = [], [], [], 0.0\n a.append(1)\n b.append(1)\n return\n\n\nclass TestTupleLengthError(unittest.TestCase):\n\n def test_tuple_length_error(self):\n # issue 2195\n # raise an error on tuples greater than 1000 in length\n @njit\n def eattuple(tup):\n return len(tup)\n\n with self.assertRaises(errors.UnsupportedError) as raises:\n tup = tuple(range(1001))\n eattuple(tup)\n\n expected = \"Tuple 'tup' length must be smaller than 1000\"\n self.assertIn(expected, str(raises.exception))\n\nclass TestTupleTypeNotIterable(unittest.TestCase):\n '''\n issue 4369\n raise an error if 'type' is not iterable\n '''\n def test_namedtuple_types_exception(self):\n with self.assertRaises(errors.TypingError) as raises:\n types.NamedTuple(types.uint32, 'p')\n self.assertIn(\n \"Argument 'types' is not iterable\",\n str(raises.exception)\n )\n\n def test_tuple_types_exception(self):\n with self.assertRaises(errors.TypingError) as raises:\n types.Tuple((types.uint32))\n self.assertIn(\n \"Argument 'types' is not iterable\",\n str(raises.exception)\n )\n\n\nclass TestTupleReturn(TestCase):\n\n def test_array_tuple(self):\n aryty = types.Array(types.float64, 1, 'C')\n cres = compile_isolated(tuple_return_usecase, (aryty, aryty))\n a = b = np.arange(5, dtype='float64')\n ra, rb = cres.entry_point(a, b)\n self.assertPreciseEqual(ra, a)\n self.assertPreciseEqual(rb, b)\n del a, b\n self.assertPreciseEqual(ra, rb)\n\n def test_scalar_tuple(self):\n scalarty = types.float32\n cres = compile_isolated(tuple_return_usecase, (scalarty, scalarty))\n a = b = 1\n ra, rb = cres.entry_point(a, b)\n self.assertEqual(ra, a)\n self.assertEqual(rb, b)\n\n def test_hetero_tuple(self):\n alltypes = []\n allvalues = []\n\n alltypes.append((types.int32, types.int64))\n allvalues.append((1, 2))\n\n alltypes.append((types.float32, types.float64))\n allvalues.append((1.125, .25))\n\n alltypes.append((types.int32, types.float64))\n allvalues.append((1231, .5))\n\n for (ta, tb), (a, b) in zip(alltypes, allvalues):\n cres = compile_isolated(tuple_return_usecase, (ta, tb))\n ra, rb = cres.entry_point(a, b)\n self.assertPreciseEqual((ra, rb), (a, b))\n\n\nclass TestTuplePassing(TestCase):\n\n def test_unituple(self):\n tuple_type = types.UniTuple(types.int32, 2)\n cr_first = compile_isolated(tuple_first, (tuple_type,))\n cr_second = compile_isolated(tuple_second, (tuple_type,))\n self.assertPreciseEqual(cr_first.entry_point((4, 5)), 4)\n self.assertPreciseEqual(cr_second.entry_point((4, 5)), 5)\n\n def test_hetero_tuple(self):\n tuple_type = types.Tuple((types.int64, types.float32))\n cr_first = compile_isolated(tuple_first, (tuple_type,))\n cr_second = compile_isolated(tuple_second, (tuple_type,))\n self.assertPreciseEqual(cr_first.entry_point((2**61, 1.5)), 2**61)\n self.assertPreciseEqual(cr_second.entry_point((2**61, 1.5)), 1.5)\n\n def test_size_mismatch(self):\n # Issue #1638: tuple size should be checked when unboxing\n tuple_type = types.UniTuple(types.int32, 2)\n cr = compile_isolated(tuple_first, (tuple_type,))\n with self.assertRaises(ValueError) as raises:\n cr.entry_point((4, 5, 6))\n self.assertEqual(str(raises.exception),\n \"size mismatch for tuple, expected 2 element(s) but got 3\")\n\n\nclass TestOperations(TestCase):\n\n def test_len(self):\n pyfunc = len_usecase\n cr = compile_isolated(pyfunc,\n [types.Tuple((types.int64, types.float32))])\n self.assertPreciseEqual(cr.entry_point((4, 5)), 2)\n cr = compile_isolated(pyfunc,\n [types.UniTuple(types.int64, 3)])\n self.assertPreciseEqual(cr.entry_point((4, 5, 6)), 3)\n\n def test_index(self):\n pyfunc = tuple_index\n cr = compile_isolated(pyfunc,\n [types.UniTuple(types.int64, 3), types.int64])\n tup = (4, 3, 6)\n for i in range(len(tup)):\n self.assertPreciseEqual(cr.entry_point(tup, i), tup[i])\n\n # test negative indexing\n for i in range(len(tup) + 1):\n self.assertPreciseEqual(cr.entry_point(tup, -i), tup[-i])\n\n # oob indexes, +ve then -ve\n with self.assertRaises(IndexError) as raises:\n cr.entry_point(tup, len(tup))\n self.assertEqual(\"tuple index out of range\", str(raises.exception))\n with self.assertRaises(IndexError) as raises:\n cr.entry_point(tup, -(len(tup) + 1))\n self.assertEqual(\"tuple index out of range\", str(raises.exception))\n\n # Test empty tuple\n cr = compile_isolated(pyfunc,\n [types.UniTuple(types.int64, 0), types.int64])\n with self.assertRaises(IndexError) as raises:\n cr.entry_point((), 0)\n self.assertEqual(\"tuple index out of range\", str(raises.exception))\n\n # test uintp indexing (because, e.g., parfor generates unsigned prange)\n cr = compile_isolated(pyfunc,\n [types.UniTuple(types.int64, 3), types.uintp])\n for i in range(len(tup)):\n self.assertPreciseEqual(cr.entry_point(tup, types.uintp(i)), tup[i])\n\n # With a compile-time static index (the code generation path is different)\n pyfunc = tuple_index_static\n for typ in (types.UniTuple(types.int64, 4),\n types.Tuple((types.int64, types.int32, types.int64, types.int32))):\n cr = compile_isolated(pyfunc, (typ,))\n tup = (4, 3, 42, 6)\n self.assertPreciseEqual(cr.entry_point(tup), pyfunc(tup))\n\n typ = types.UniTuple(types.int64, 1)\n with self.assertTypingError():\n cr = compile_isolated(pyfunc, (typ,))\n\n # test unpack, staticgetitem with imprecise type (issue #3895)\n pyfunc = tuple_unpack_static_getitem_err\n with self.assertTypingError() as raises:\n cr = compile_isolated(pyfunc, ())\n msg = (\"Cannot infer the type of variable 'c', have imprecise type: \"\n \"list(undefined).\")\n self.assertIn(msg, str(raises.exception))\n\n\n def test_in(self):\n pyfunc = in_usecase\n cr = compile_isolated(pyfunc,\n [types.int64, types.UniTuple(types.int64, 3)])\n tup = (4, 1, 5)\n for i in range(5):\n self.assertPreciseEqual(cr.entry_point(i, tup), pyfunc(i, tup))\n\n def check_slice(self, pyfunc):\n tup = (4, 5, 6, 7)\n cr = compile_isolated(pyfunc,\n [types.UniTuple(types.int64, 4)])\n self.assertPreciseEqual(cr.entry_point(tup), pyfunc(tup))\n cr = compile_isolated(\n pyfunc,\n [types.Tuple((types.int64, types.int32, types.int64, types.int32))])\n self.assertPreciseEqual(cr.entry_point(tup), pyfunc(tup))\n\n def test_slice2(self):\n self.check_slice(tuple_slice2)\n\n def test_slice3(self):\n self.check_slice(tuple_slice3)\n\n def test_bool(self):\n pyfunc = bool_usecase\n cr = compile_isolated(pyfunc,\n [types.Tuple((types.int64, types.int32))])\n args = ((4, 5),)\n self.assertPreciseEqual(cr.entry_point(*args), pyfunc(*args))\n cr = compile_isolated(pyfunc,\n [types.UniTuple(types.int64, 3)])\n args = ((4, 5, 6),)\n self.assertPreciseEqual(cr.entry_point(*args), pyfunc(*args))\n cr = compile_isolated(pyfunc,\n [types.Tuple(())])\n self.assertPreciseEqual(cr.entry_point(()), pyfunc(()))\n\n def test_add(self):\n pyfunc = add_usecase\n samples = [(types.Tuple(()), ()),\n (types.UniTuple(types.int32, 0), ()),\n (types.UniTuple(types.int32, 1), (42,)),\n (types.Tuple((types.int64, types.float32)), (3, 4.5)),\n ]\n for (ta, a), (tb, b) in itertools.product(samples, samples):\n cr = compile_isolated(pyfunc, (ta, tb))\n expected = pyfunc(a, b)\n got = cr.entry_point(a, b)\n self.assertPreciseEqual(got, expected, msg=(ta, tb))\n\n def _test_compare(self, pyfunc):\n def eq(pyfunc, cfunc, args):\n self.assertIs(cfunc(*args), pyfunc(*args),\n \"mismatch for arguments %s\" % (args,))\n\n # Same-sized tuples\n argtypes = [types.Tuple((types.int64, types.float32)),\n types.UniTuple(types.int32, 2)]\n for ta, tb in itertools.product(argtypes, argtypes):\n cr = compile_isolated(pyfunc, (ta, tb))\n cfunc = cr.entry_point\n for args in [((4, 5), (4, 5)),\n ((4, 5), (4, 6)),\n ((4, 6), (4, 5)),\n ((4, 5), (5, 4))]:\n eq(pyfunc, cfunc, args)\n # Different-sized tuples\n argtypes = [types.Tuple((types.int64, types.float32)),\n types.UniTuple(types.int32, 3)]\n cr = compile_isolated(pyfunc, tuple(argtypes))\n cfunc = cr.entry_point\n for args in [((4, 5), (4, 5, 6)),\n ((4, 5), (4, 4, 6)),\n ((4, 5), (4, 6, 7))]:\n eq(pyfunc, cfunc, args)\n\n def test_eq(self):\n self._test_compare(eq_usecase)\n\n def test_ne(self):\n self._test_compare(ne_usecase)\n\n def test_gt(self):\n self._test_compare(gt_usecase)\n\n def test_ge(self):\n self._test_compare(ge_usecase)\n\n def test_lt(self):\n self._test_compare(lt_usecase)\n\n def test_le(self):\n self._test_compare(le_usecase)\n\n\nclass TestNamedTuple(TestCase, MemoryLeakMixin):\n\n def test_unpack(self):\n def check(p):\n for pyfunc in tuple_first, tuple_second:\n cfunc = jit(nopython=True)(pyfunc)\n self.assertPreciseEqual(cfunc(p), pyfunc(p))\n\n # Homogeneous\n check(Rect(4, 5))\n # Heterogeneous\n check(Rect(4, 5.5))\n\n def test_len(self):\n def check(p):\n pyfunc = len_usecase\n cfunc = jit(nopython=True)(pyfunc)\n self.assertPreciseEqual(cfunc(p), pyfunc(p))\n\n # Homogeneous\n check(Rect(4, 5))\n check(Point(4, 5, 6))\n # Heterogeneous\n check(Rect(4, 5.5))\n check(Point(4, 5.5, 6j))\n\n def test_index(self):\n pyfunc = tuple_index\n cfunc = jit(nopython=True)(pyfunc)\n\n p = Point(4, 5, 6)\n for i in range(len(p)):\n self.assertPreciseEqual(cfunc(p, i), pyfunc(p, i))\n\n # test uintp indexing (because, e.g., parfor generates unsigned prange)\n for i in range(len(p)):\n self.assertPreciseEqual(cfunc(p, types.uintp(i)), pyfunc(p, i))\n\n def test_bool(self):\n def check(p):\n pyfunc = bool_usecase\n cfunc = jit(nopython=True)(pyfunc)\n self.assertPreciseEqual(cfunc(p), pyfunc(p))\n\n # Homogeneous\n check(Rect(4, 5))\n # Heterogeneous\n check(Rect(4, 5.5))\n check(Empty())\n\n def _test_compare(self, pyfunc):\n def eq(pyfunc, cfunc, args):\n self.assertIs(cfunc(*args), pyfunc(*args),\n \"mismatch for arguments %s\" % (args,))\n\n cfunc = jit(nopython=True)(pyfunc)\n\n # Same-sized named tuples\n for a, b in [((4, 5), (4, 5)),\n ((4, 5), (4, 6)),\n ((4, 6), (4, 5)),\n ((4, 5), (5, 4))]:\n eq(pyfunc, cfunc, (Rect(*a), Rect(*b)))\n\n # Different-sized named tuples\n for a, b in [((4, 5), (4, 5, 6)),\n ((4, 5), (4, 4, 6)),\n ((4, 5), (4, 6, 7))]:\n eq(pyfunc, cfunc, (Rect(*a), Point(*b)))\n\n def test_eq(self):\n self._test_compare(eq_usecase)\n\n def test_ne(self):\n self._test_compare(ne_usecase)\n\n def test_gt(self):\n self._test_compare(gt_usecase)\n\n def test_ge(self):\n self._test_compare(ge_usecase)\n\n def test_lt(self):\n self._test_compare(lt_usecase)\n\n def test_le(self):\n self._test_compare(le_usecase)\n\n def test_getattr(self):\n pyfunc = getattr_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n for args in (4, 5, 6), (4, 5.5, 6j):\n p = Point(*args)\n self.assertPreciseEqual(cfunc(p), pyfunc(p))\n\n def test_construct(self):\n def check(pyfunc):\n cfunc = jit(nopython=True)(pyfunc)\n for args in (4, 5, 6), (4, 5.5, 6j):\n expected = pyfunc(*args)\n got = cfunc(*args)\n self.assertIs(type(got), type(expected))\n self.assertPreciseEqual(got, expected)\n\n check(make_point)\n check(make_point_kws)\n\n def test_type(self):\n # Test the type() built-in on named tuples\n pyfunc = type_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n arg_tuples = [(4, 5, 6), (4, 5.5, 6j)]\n for tup_args, args in itertools.product(arg_tuples, arg_tuples):\n tup = Point(*tup_args)\n expected = pyfunc(tup, *args)\n got = cfunc(tup, *args)\n self.assertIs(type(got), type(expected))\n self.assertPreciseEqual(got, expected)\n\n def test_literal_unification(self):\n # Test for #3565.\n @jit(nopython=True)\n def Data1(value):\n return Rect(value, -321)\n\n @jit(nopython=True)\n def call(i, j):\n if j == 0:\n # In the error, `result` is typed to `Rect(int, LiteralInt)`\n # because of the `-321` literal. This doesn't match the\n # `result` type in the other branch.\n result = Data1(i)\n else:\n # `result` is typed to be `Rect(int, int)`\n result = Rect(i, j)\n return result\n\n r = call(123, 1321)\n self.assertEqual(r, Rect(width=123, height=1321))\n r = call(123, 0)\n self.assertEqual(r, Rect(width=123, height=-321))\n\n def test_string_literal_in_ctor(self):\n # Test for issue #3813\n\n @jit(nopython=True)\n def foo():\n return Rect(10, 'somestring')\n\n r = foo()\n self.assertEqual(r, Rect(width=10, height='somestring'))\n\n def test_dispatcher_mistreat(self):\n # Test for issue #5215 that mistreat namedtuple as tuples\n @jit(nopython=True)\n def foo(x):\n return x\n\n in1 = (1, 2, 3)\n out1 = foo(in1)\n self.assertEqual(in1, out1)\n\n in2 = Point(1, 2, 3)\n out2 = foo(in2)\n self.assertEqual(in2, out2)\n\n # Check the signatures\n self.assertEqual(len(foo.nopython_signatures), 2)\n self.assertEqual(foo.nopython_signatures[0].args[0], typeof(in1))\n self.assertEqual(foo.nopython_signatures[1].args[0], typeof(in2))\n\n # Differently named\n in3 = Point2(1, 2, 3)\n out3 = foo(in3)\n self.assertEqual(in3, out3)\n self.assertEqual(len(foo.nopython_signatures), 3)\n self.assertEqual(foo.nopython_signatures[2].args[0], typeof(in3))\n\n\nclass TestTupleNRT(TestCase, MemoryLeakMixin):\n def test_tuple_add(self):\n def pyfunc(x):\n a = np.arange(3)\n return (a,) + (x,)\n\n cfunc = jit(nopython=True)(pyfunc)\n x = 123\n expect_a, expect_x = pyfunc(x)\n got_a, got_x = cfunc(x)\n np.testing.assert_equal(got_a, expect_a)\n self.assertEqual(got_x, expect_x)\n\n\nclass TestNamedTupleNRT(TestCase, MemoryLeakMixin):\n\n def test_return(self):\n # Check returning a namedtuple with a list inside it\n pyfunc = make_point_nrt\n cfunc = jit(nopython=True)(pyfunc)\n\n for arg in (3, 0):\n expected = pyfunc(arg)\n got = cfunc(arg)\n self.assertIs(type(got), type(expected))\n self.assertPreciseEqual(got, expected)\n\n\nclass TestConversions(TestCase):\n \"\"\"\n Test implicit conversions between tuple types.\n \"\"\"\n\n def check_conversion(self, fromty, toty, val):\n pyfunc = identity\n cr = compile_isolated(pyfunc, (fromty,), toty)\n cfunc = cr.entry_point\n res = cfunc(val)\n self.assertEqual(res, val)\n\n def test_conversions(self):\n check = self.check_conversion\n fromty = types.UniTuple(types.int32, 2)\n check(fromty, types.UniTuple(types.float32, 2), (4, 5))\n check(fromty, types.Tuple((types.float32, types.int16)), (4, 5))\n aty = types.UniTuple(types.int32, 0)\n bty = types.Tuple(())\n check(aty, bty, ())\n check(bty, aty, ())\n\n with self.assertRaises(errors.TypingError) as raises:\n check(fromty, types.Tuple((types.float32,)), (4, 5))\n msg = \"No conversion from UniTuple(int32 x 2) to UniTuple(float32 x 1)\"\n self.assertIn(msg, str(raises.exception))\n\n\nclass TestMethods(TestCase):\n\n def test_index(self):\n pyfunc = index_method_usecase\n cfunc = jit(nopython=True)(pyfunc)\n self.assertEqual(cfunc((1, 2, 3), 2), 1)\n\n with self.assertRaises(ValueError) as raises:\n cfunc((1, 2, 3), 4)\n msg = 'tuple.index(x): x not in tuple'\n self.assertEqual(msg, str(raises.exception))\n\n\nclass TestTupleBuild(TestCase):\n\n def test_build_unpack(self):\n def check(p):\n # using eval here since Python 2 doesn't even support the syntax\n pyfunc = eval(\"lambda a: (1, *a)\")\n cfunc = jit(nopython=True)(pyfunc)\n self.assertPreciseEqual(cfunc(p), pyfunc(p))\n\n # Homogeneous\n check((4, 5))\n # Heterogeneous\n check((4, 5.5))\n\n def test_build_unpack_more(self):\n def check(p):\n # using eval here since Python 2 doesn't even support the syntax\n pyfunc = eval(\"lambda a: (1, *a, (1, 2), *a)\")\n cfunc = jit(nopython=True)(pyfunc)\n self.assertPreciseEqual(cfunc(p), pyfunc(p))\n\n # Homogeneous\n check((4, 5))\n # Heterogeneous\n check((4, 5.5))\n\n def test_build_unpack_call(self):\n def check(p):\n # using eval here since Python 2 doesn't even support the syntax\n @jit\n def inner(*args):\n return args\n pyfunc = eval(\"lambda a: inner(1, *a)\", locals())\n cfunc = jit(nopython=True)(pyfunc)\n self.assertPreciseEqual(cfunc(p), pyfunc(p))\n\n # Homogeneous\n check((4, 5))\n # Heterogeneous\n check((4, 5.5))\n\n @unittest.skipIf(utils.PYVERSION < (3, 6), \"needs Python 3.6+\")\n def test_build_unpack_call_more(self):\n def check(p):\n # using eval here since Python 2 doesn't even support the syntax\n @jit\n def inner(*args):\n return args\n pyfunc = eval(\"lambda a: inner(1, *a, *(1, 2), *a)\", locals())\n cfunc = jit(nopython=True)(pyfunc)\n self.assertPreciseEqual(cfunc(p), pyfunc(p))\n\n # Homogeneous\n check((4, 5))\n # Heterogeneous\n check((4, 5.5))\n\n def test_tuple_constructor(self):\n def check(pyfunc, arg):\n cfunc = jit(nopython=True)(pyfunc)\n self.assertPreciseEqual(cfunc(arg), pyfunc(arg))\n\n # empty\n check(lambda _: tuple(), ())\n # Homogeneous\n check(lambda a: tuple(a), (4, 5))\n # Heterogeneous\n check(lambda a: tuple(a), (4, 5.5))\n\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import numpy as np\nfrom numba import cuda, float32, int32\nfrom numba.core.errors import TypingError\nfrom numba.cuda.testing import unittest, CUDATestCase\nfrom numba.cuda.testing import skip_on_cudasim\n\nGLOBAL_CONSTANT = 5\nGLOBAL_CONSTANT_2 = 6\nGLOBAL_CONSTANT_TUPLE = 5, 6\n\n\ndef udt_global_constants(A):\n sa = cuda.shared.array(shape=GLOBAL_CONSTANT, dtype=float32)\n i = cuda.grid(1)\n A[i] = sa[i]\n\n\ndef udt_global_build_tuple(A):\n sa = cuda.shared.array(shape=(GLOBAL_CONSTANT, GLOBAL_CONSTANT_2),\n dtype=float32)\n i, j = cuda.grid(2)\n A[i, j] = sa[i, j]\n\n\ndef udt_global_build_list(A):\n sa = cuda.shared.array(shape=[GLOBAL_CONSTANT, GLOBAL_CONSTANT_2],\n dtype=float32)\n i, j = cuda.grid(2)\n A[i, j] = sa[i, j]\n\n\ndef udt_global_constant_tuple(A):\n sa = cuda.shared.array(shape=GLOBAL_CONSTANT_TUPLE, dtype=float32)\n i, j = cuda.grid(2)\n A[i, j] = sa[i, j]\n\n\ndef udt_invalid_1(A):\n sa = cuda.shared.array(shape=A[0], dtype=float32)\n i = cuda.grid(1)\n A[i] = sa[i]\n\n\ndef udt_invalid_2(A):\n sa = cuda.shared.array(shape=(1, A[0]), dtype=float32)\n i, j = cuda.grid(2)\n A[i, j] = sa[i, j]\n\n\ndef udt_invalid_3(A):\n sa = cuda.shared.array(shape=(1, A[0]), dtype=float32)\n i = cuda.grid(1)\n A[i] = sa[i, 0]\n\n\nclass TestSharedMemoryCreation(CUDATestCase):\n def getarg(self):\n return np.array(100, dtype=np.float32, ndmin=1)\n\n def getarg2(self):\n return self.getarg().reshape(1,1)\n\n def test_global_constants(self):\n udt = cuda.jit((float32[:],))(udt_global_constants)\n udt[1, 1](self.getarg())\n\n def test_global_build_tuple(self):\n udt = cuda.jit((float32[:, :],))(udt_global_build_tuple)\n udt[1, 1](self.getarg2())\n\n @skip_on_cudasim('Simulator does not perform macro expansion')\n def test_global_build_list(self):\n with self.assertRaises(TypingError) as raises:\n cuda.jit((float32[:, :],))(udt_global_build_list)\n\n self.assertIn(\"No implementation of function \"\n \"Function(<function shared.array\",\n str(raises.exception))\n self.assertIn(\"found for signature:\\n \\n \"\n \">>> array(shape=list(int64), dtype=class(float32)\",\n str(raises.exception))\n\n def test_global_constant_tuple(self):\n udt = cuda.jit((float32[:, :],))(udt_global_constant_tuple)\n udt[1, 1](self.getarg2())\n\n @skip_on_cudasim(\"Can't check for constants in simulator\")\n def test_invalid_1(self):\n # Scalar shape cannot be a floating point value\n with self.assertRaises(TypingError) as raises:\n cuda.jit((float32[:],))(udt_invalid_1)\n\n self.assertIn(\"No implementation of function \"\n \"Function(<function shared.array\",\n str(raises.exception))\n self.assertIn(\"found for signature:\\n \\n \"\n \">>> array(shape=float32, dtype=class(float32))\",\n str(raises.exception))\n\n @skip_on_cudasim(\"Can't check for constants in simulator\")\n def test_invalid_2(self):\n # Tuple shape cannot contain a floating point value\n with self.assertRaises(TypingError) as raises:\n cuda.jit((float32[:, :],))(udt_invalid_2)\n\n self.assertIn(\"No implementation of function \"\n \"Function(<function shared.array\",\n str(raises.exception))\n self.assertIn(\"found for signature:\\n \\n \"\n \">>> array(shape=Tuple(Literal[int](1), \"\n \"array(float32, 1d, A)), dtype=class(float32))\",\n str(raises.exception))\n\n @skip_on_cudasim(\"Can't check for constants in simulator\")\n def test_invalid_3(self):\n # Scalar shape must be literal\n with self.assertRaises(TypingError) as raises:\n cuda.jit((int32[:],))(udt_invalid_1)\n\n self.assertIn(\"No implementation of function \"\n \"Function(<function shared.array\",\n str(raises.exception))\n self.assertIn(\"found for signature:\\n \\n \"\n \">>> array(shape=int32, dtype=class(float32))\",\n str(raises.exception))\n\n @skip_on_cudasim(\"Can't check for constants in simulator\")\n def test_invalid_4(self):\n # Tuple shape must contain only literals\n with self.assertRaises(TypingError) as raises:\n cuda.jit((int32[:],))(udt_invalid_3)\n\n self.assertIn(\"No implementation of function \"\n \"Function(<function shared.array\",\n str(raises.exception))\n self.assertIn(\"found for signature:\\n \\n \"\n \">>> array(shape=Tuple(Literal[int](1), int32), \"\n \"dtype=class(float32))\",\n str(raises.exception))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.arange",
"numpy.zeros",
"numpy.testing.assert_equal"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fintzd/rasa | [
"6359be5509c7d87cd29c2ab5149bc45e843fea85",
"6359be5509c7d87cd29c2ab5149bc45e843fea85"
] | [
"rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py",
"rasa/core/featurizers/single_state_featurizer.py"
] | [
"from __future__ import annotations\nimport numpy as np\nimport logging\n\nfrom typing import Any, Text, List, Dict, Tuple, Type\n\nfrom rasa.engine.graph import ExecutionContext, GraphComponent\nfrom rasa.engine.recipes.default_recipe import DefaultV1Recipe\nfrom rasa.engine.storage.resource import Resource\nfrom rasa.engine.storage.storage import ModelStorage\nfrom rasa.nlu.featurizers.dense_featurizer.dense_featurizer import DenseFeaturizer\nfrom rasa.nlu.tokenizers.tokenizer import Token, Tokenizer\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.nlu.constants import (\n DENSE_FEATURIZABLE_ATTRIBUTES,\n SEQUENCE_FEATURES,\n SENTENCE_FEATURES,\n NO_LENGTH_RESTRICTION,\n NUMBER_OF_SUB_TOKENS,\n TOKENS_NAMES,\n)\nfrom rasa.shared.nlu.constants import (\n TEXT,\n ACTION_TEXT,\n)\nfrom rasa.utils import train_utils\n\nlogger = logging.getLogger(__name__)\n\nMAX_SEQUENCE_LENGTHS = {\n \"bert\": 512,\n \"gpt\": 512,\n \"gpt2\": 512,\n \"xlnet\": NO_LENGTH_RESTRICTION,\n \"distilbert\": 512,\n \"roberta\": 512,\n}\n\n\[email protected](\n DefaultV1Recipe.ComponentType.MESSAGE_FEATURIZER, is_trainable=False\n)\nclass LanguageModelFeaturizer(DenseFeaturizer, GraphComponent):\n \"\"\"A featurizer that uses transformer-based language models.\n\n This component loads a pre-trained language model\n from the Transformers library (https://github.com/huggingface/transformers)\n including BERT, GPT, GPT-2, xlnet, distilbert, and roberta.\n It also tokenizes and featurizes the featurizable dense attributes of\n each message.\n \"\"\"\n\n @classmethod\n def required_components(cls) -> List[Type]:\n \"\"\"Components that should be included in the pipeline before this component.\"\"\"\n return [Tokenizer]\n\n def __init__(\n self, config: Dict[Text, Any], execution_context: ExecutionContext,\n ) -> None:\n \"\"\"Initializes the featurizer with the model in the config.\"\"\"\n super(LanguageModelFeaturizer, self).__init__(\n execution_context.node_name, config\n )\n self._load_model_metadata()\n self._load_model_instance()\n\n @staticmethod\n def get_default_config() -> Dict[Text, Any]:\n \"\"\"Returns LanguageModelFeaturizer's default config.\"\"\"\n return {\n **DenseFeaturizer.get_default_config(),\n # name of the language model to load.\n \"model_name\": \"bert\",\n # Pre-Trained weights to be loaded(string)\n \"model_weights\": None,\n # an optional path to a specific directory to download\n # and cache the pre-trained model weights.\n \"cache_dir\": None,\n }\n\n @classmethod\n def validate_config(cls, config: Dict[Text, Any]) -> None:\n \"\"\"Validates the configuration.\"\"\"\n pass\n\n @classmethod\n def create(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n ) -> LanguageModelFeaturizer:\n \"\"\"Creates a LanguageModelFeaturizer.\n\n Loads the model specified in the config.\n \"\"\"\n return cls(config, execution_context)\n\n @staticmethod\n def required_packages() -> List[Text]:\n \"\"\"Returns the extra python dependencies required.\"\"\"\n return [\"transformers\"]\n\n def _load_model_metadata(self) -> None:\n \"\"\"Loads the metadata for the specified model and set them as properties.\n\n This includes the model name, model weights, cache directory and the\n maximum sequence length the model can handle.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_class_dict,\n model_weights_defaults,\n )\n\n self.model_name = self._config[\"model_name\"]\n\n if self.model_name not in model_class_dict:\n raise KeyError(\n f\"'{self.model_name}' not a valid model name. Choose from \"\n f\"{str(list(model_class_dict.keys()))} or create\"\n f\"a new class inheriting from this class to support your model.\"\n )\n\n self.model_weights = self._config[\"model_weights\"]\n self.cache_dir = self._config[\"cache_dir\"]\n\n if not self.model_weights:\n logger.info(\n f\"Model weights not specified. Will choose default model \"\n f\"weights: {model_weights_defaults[self.model_name]}\"\n )\n self.model_weights = model_weights_defaults[self.model_name]\n\n self.max_model_sequence_length = MAX_SEQUENCE_LENGTHS[self.model_name]\n\n def _load_model_instance(self) -> None:\n \"\"\"Tries to load the model instance.\n\n Model loading should be skipped in unit tests.\n See unit tests for examples.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_class_dict,\n model_tokenizer_dict,\n )\n\n logger.debug(f\"Loading Tokenizer and Model for {self.model_name}\")\n\n self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained(\n self.model_weights, cache_dir=self.cache_dir\n )\n self.model = model_class_dict[self.model_name].from_pretrained(\n self.model_weights, cache_dir=self.cache_dir\n )\n\n # Use a universal pad token since all transformer architectures do not have a\n # consistent token. Instead of pad_token_id we use unk_token_id because\n # pad_token_id is not set for all architectures. We can't add a new token as\n # well since vocabulary resizing is not yet supported for TF classes.\n # Also, this does not hurt the model predictions since we use an attention mask\n # while feeding input.\n self.pad_token_id = self.tokenizer.unk_token_id\n\n def _lm_tokenize(self, text: Text) -> Tuple[List[int], List[Text]]:\n \"\"\"Passes the text through the tokenizer of the language model.\n\n Args:\n text: Text to be tokenized.\n\n Returns: List of token ids and token strings.\n \"\"\"\n split_token_ids = self.tokenizer.encode(text, add_special_tokens=False)\n\n split_token_strings = self.tokenizer.convert_ids_to_tokens(split_token_ids)\n\n return split_token_ids, split_token_strings\n\n def _add_lm_specific_special_tokens(\n self, token_ids: List[List[int]]\n ) -> List[List[int]]:\n \"\"\"Adds the language and model-specific tokens used during training.\n\n Args:\n token_ids: List of token ids for each example in the batch.\n\n Returns: Augmented list of token ids for each example in the batch.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_special_tokens_pre_processors,\n )\n\n augmented_tokens = [\n model_special_tokens_pre_processors[self.model_name](example_token_ids)\n for example_token_ids in token_ids\n ]\n return augmented_tokens\n\n def _lm_specific_token_cleanup(\n self, split_token_ids: List[int], token_strings: List[Text]\n ) -> Tuple[List[int], List[Text]]:\n \"\"\"Cleans up special chars added by tokenizers of language models.\n\n Many language models add a special char in front/back of (some) words. We clean\n up those chars as they are not\n needed once the features are already computed.\n\n Args:\n split_token_ids: List of token ids received as output from the language\n model specific tokenizer.\n token_strings: List of token strings received as output from the language\n model specific tokenizer.\n\n Returns: Cleaned up token ids and token strings.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import model_tokens_cleaners\n\n return model_tokens_cleaners[self.model_name](split_token_ids, token_strings)\n\n def _post_process_sequence_embeddings(\n self, sequence_embeddings: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Computes sentence and sequence level representations for relevant tokens.\n\n Args:\n sequence_embeddings: Sequence level dense features received as output from\n language model.\n\n Returns: Sentence and sequence level representations.\n \"\"\"\n from rasa.nlu.utils.hugging_face.registry import (\n model_embeddings_post_processors,\n )\n\n sentence_embeddings = []\n post_processed_sequence_embeddings = []\n\n for example_embedding in sequence_embeddings:\n (\n example_sentence_embedding,\n example_post_processed_embedding,\n ) = model_embeddings_post_processors[self.model_name](example_embedding)\n\n sentence_embeddings.append(example_sentence_embedding)\n post_processed_sequence_embeddings.append(example_post_processed_embedding)\n\n return (\n np.array(sentence_embeddings),\n np.array(post_processed_sequence_embeddings),\n )\n\n def _tokenize_example(\n self, message: Message, attribute: Text\n ) -> Tuple[List[Token], List[int]]:\n \"\"\"Tokenizes a single message example.\n\n Many language models add a special char in front of (some) words and split\n words into sub-words. To ensure the entity start and end values matches the\n token values, use the tokens produced by the Tokenizer component. If\n individual tokens are split up into multiple tokens, we add this information\n to the respected token.\n\n Args:\n message: Single message object to be processed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n\n Returns: List of token strings and token ids for the corresponding\n attribute of the message.\n \"\"\"\n tokens_in = message.get(TOKENS_NAMES[attribute])\n tokens_out = []\n\n token_ids_out = []\n\n for token in tokens_in:\n # use lm specific tokenizer to further tokenize the text\n split_token_ids, split_token_strings = self._lm_tokenize(token.text)\n\n if not split_token_ids:\n # fix the situation that `token.text` only contains whitespace or other\n # special characters, which cause `split_token_ids` and\n # `split_token_strings` be empty, finally cause\n # `self._lm_specific_token_cleanup()` to raise an exception\n continue\n\n (split_token_ids, split_token_strings) = self._lm_specific_token_cleanup(\n split_token_ids, split_token_strings\n )\n\n token_ids_out += split_token_ids\n\n token.set(NUMBER_OF_SUB_TOKENS, len(split_token_strings))\n\n tokens_out.append(token)\n\n return tokens_out, token_ids_out\n\n def _get_token_ids_for_batch(\n self, batch_examples: List[Message], attribute: Text\n ) -> Tuple[List[List[Token]], List[List[int]]]:\n \"\"\"Computes token ids and token strings for each example in batch.\n\n A token id is the id of that token in the vocabulary of the language model.\n\n Args:\n batch_examples: Batch of message objects for which tokens need to be\n computed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n\n Returns: List of token strings and token ids for each example in the batch.\n \"\"\"\n batch_token_ids = []\n batch_tokens = []\n for example in batch_examples:\n\n example_tokens, example_token_ids = self._tokenize_example(\n example, attribute\n )\n batch_tokens.append(example_tokens)\n batch_token_ids.append(example_token_ids)\n\n return batch_tokens, batch_token_ids\n\n @staticmethod\n def _compute_attention_mask(\n actual_sequence_lengths: List[int], max_input_sequence_length: int\n ) -> np.ndarray:\n \"\"\"Computes a mask for padding tokens.\n\n This mask will be used by the language model so that it does not attend to\n padding tokens.\n\n Args:\n actual_sequence_lengths: List of length of each example without any\n padding.\n max_input_sequence_length: Maximum length of a sequence that will be\n present in the input batch. This is\n after taking into consideration the maximum input sequence the model\n can handle. Hence it can never be\n greater than self.max_model_sequence_length in case the model\n applies length restriction.\n\n Returns: Computed attention mask, 0 for padding and 1 for non-padding\n tokens.\n \"\"\"\n attention_mask = []\n\n for actual_sequence_length in actual_sequence_lengths:\n # add 1s for present tokens, fill up the remaining space up to max\n # sequence length with 0s (non-existing tokens)\n padded_sequence = [1] * min(\n actual_sequence_length, max_input_sequence_length\n ) + [0] * (\n max_input_sequence_length\n - min(actual_sequence_length, max_input_sequence_length)\n )\n attention_mask.append(padded_sequence)\n\n attention_mask = np.array(attention_mask).astype(np.float32)\n return attention_mask\n\n def _extract_sequence_lengths(\n self, batch_token_ids: List[List[int]]\n ) -> Tuple[List[int], int]:\n \"\"\"Extracts the sequence length for each example and maximum sequence length.\n\n Args:\n batch_token_ids: List of token ids for each example in the batch.\n\n Returns:\n Tuple consisting of: the actual sequence lengths for each example,\n and the maximum input sequence length (taking into account the\n maximum sequence length that the model can handle.\n \"\"\"\n # Compute max length across examples\n max_input_sequence_length = 0\n actual_sequence_lengths = []\n\n for example_token_ids in batch_token_ids:\n sequence_length = len(example_token_ids)\n actual_sequence_lengths.append(sequence_length)\n max_input_sequence_length = max(\n max_input_sequence_length, len(example_token_ids)\n )\n\n # Take into account the maximum sequence length the model can handle\n max_input_sequence_length = (\n max_input_sequence_length\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION\n else min(max_input_sequence_length, self.max_model_sequence_length)\n )\n\n return actual_sequence_lengths, max_input_sequence_length\n\n def _add_padding_to_batch(\n self, batch_token_ids: List[List[int]], max_sequence_length_model: int\n ) -> List[List[int]]:\n \"\"\"Adds padding so that all examples in the batch are of the same length.\n\n Args:\n batch_token_ids: Batch of examples where each example is a non-padded list\n of token ids.\n max_sequence_length_model: Maximum length of any input sequence in the batch\n to be fed to the model.\n\n Returns:\n Padded batch with all examples of the same length.\n \"\"\"\n padded_token_ids = []\n\n # Add padding according to max_sequence_length\n # Some models don't contain pad token, we use unknown token as padding token.\n # This doesn't affect the computation since we compute an attention mask\n # anyways.\n for example_token_ids in batch_token_ids:\n\n # Truncate any longer sequences so that they can be fed to the model\n if len(example_token_ids) > max_sequence_length_model:\n example_token_ids = example_token_ids[:max_sequence_length_model]\n\n padded_token_ids.append(\n example_token_ids\n + [self.pad_token_id]\n * (max_sequence_length_model - len(example_token_ids))\n )\n return padded_token_ids\n\n @staticmethod\n def _extract_nonpadded_embeddings(\n embeddings: np.ndarray, actual_sequence_lengths: List[int]\n ) -> np.ndarray:\n \"\"\"Extracts embeddings for actual tokens.\n\n Use pre-computed non-padded lengths of each example to extract embeddings\n for non-padding tokens.\n\n Args:\n embeddings: sequence level representations for each example of the batch.\n actual_sequence_lengths: non-padded lengths of each example of the batch.\n\n Returns:\n Sequence level embeddings for only non-padding tokens of the batch.\n \"\"\"\n nonpadded_sequence_embeddings = []\n for index, embedding in enumerate(embeddings):\n unmasked_embedding = embedding[: actual_sequence_lengths[index]]\n nonpadded_sequence_embeddings.append(unmasked_embedding)\n\n return np.array(nonpadded_sequence_embeddings)\n\n def _compute_batch_sequence_features(\n self, batch_attention_mask: np.ndarray, padded_token_ids: List[List[int]]\n ) -> np.ndarray:\n \"\"\"Feeds the padded batch to the language model.\n\n Args:\n batch_attention_mask: Mask of 0s and 1s which indicate whether the token\n is a padding token or not.\n padded_token_ids: Batch of token ids for each example. The batch is padded\n and hence can be fed at once.\n\n Returns:\n Sequence level representations from the language model.\n \"\"\"\n model_outputs = self.model(\n np.array(padded_token_ids), attention_mask=np.array(batch_attention_mask)\n )\n\n # sequence hidden states is always the first output from all models\n sequence_hidden_states = model_outputs[0]\n\n sequence_hidden_states = sequence_hidden_states.numpy()\n return sequence_hidden_states\n\n def _validate_sequence_lengths(\n self,\n actual_sequence_lengths: List[int],\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> None:\n \"\"\"Validates sequence length.\n\n Checks if sequence lengths of inputs are less than\n the max sequence length the model can handle.\n\n This method should throw an error during training, and log a debug\n message during inference if any of the input examples have a length\n greater than maximum sequence length allowed.\n\n Args:\n actual_sequence_lengths: original sequence length of all inputs\n batch_examples: all message instances in the batch\n attribute: attribute of message object to be processed\n inference_mode: whether this is during training or inference\n \"\"\"\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:\n # There is no restriction on sequence length from the model\n return\n\n for sequence_length, example in zip(actual_sequence_lengths, batch_examples):\n if sequence_length > self.max_model_sequence_length:\n if not inference_mode:\n raise RuntimeError(\n f\"The sequence length of '{example.get(attribute)[:20]}...' \"\n f\"is too long({sequence_length} tokens) for the \"\n f\"model chosen {self.model_name} which has a maximum \"\n f\"sequence length of {self.max_model_sequence_length} tokens. \"\n f\"Either shorten the message or use a model which has no \"\n f\"restriction on input sequence length like XLNet.\"\n )\n logger.debug(\n f\"The sequence length of '{example.get(attribute)[:20]}...' \"\n f\"is too long({sequence_length} tokens) for the \"\n f\"model chosen {self.model_name} which has a maximum \"\n f\"sequence length of {self.max_model_sequence_length} tokens. \"\n f\"Downstream model predictions may be affected because of this.\"\n )\n\n def _add_extra_padding(\n self, sequence_embeddings: np.ndarray, actual_sequence_lengths: List[int]\n ) -> np.ndarray:\n \"\"\"Adds extra zero padding to match the original sequence length.\n\n This is only done if the input was truncated during the batch\n preparation of input for the model.\n Args:\n sequence_embeddings: Embeddings returned from the model\n actual_sequence_lengths: original sequence length of all inputs\n\n Returns:\n Modified sequence embeddings with padding if necessary\n \"\"\"\n if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:\n # No extra padding needed because there wouldn't have been any\n # truncation in the first place\n return sequence_embeddings\n\n reshaped_sequence_embeddings = []\n for index, embedding in enumerate(sequence_embeddings):\n embedding_size = embedding.shape[-1]\n if actual_sequence_lengths[index] > self.max_model_sequence_length:\n embedding = np.concatenate(\n [\n embedding,\n np.zeros(\n (\n actual_sequence_lengths[index]\n - self.max_model_sequence_length,\n embedding_size,\n ),\n dtype=np.float32,\n ),\n ]\n )\n reshaped_sequence_embeddings.append(embedding)\n\n return np.array(reshaped_sequence_embeddings)\n\n def _get_model_features_for_batch(\n self,\n batch_token_ids: List[List[int]],\n batch_tokens: List[List[Token]],\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Computes dense features of each example in the batch.\n\n We first add the special tokens corresponding to each language model. Next, we\n add appropriate padding and compute a mask for that padding so that it doesn't\n affect the feature computation. The padded batch is next fed to the language\n model and token level embeddings are computed. Using the pre-computed mask,\n embeddings for non-padding tokens are extracted and subsequently sentence\n level embeddings are computed.\n\n Args:\n batch_token_ids: List of token ids of each example in the batch.\n batch_tokens: List of token objects for each example in the batch.\n batch_examples: List of examples in the batch.\n attribute: attribute of the Message object to be processed.\n inference_mode: Whether the call is during training or during inference.\n\n Returns:\n Sentence and token level dense representations.\n \"\"\"\n # Let's first add tokenizer specific special tokens to all examples\n batch_token_ids_augmented = self._add_lm_specific_special_tokens(\n batch_token_ids\n )\n\n # Compute sequence lengths for all examples\n (\n actual_sequence_lengths,\n max_input_sequence_length,\n ) = self._extract_sequence_lengths(batch_token_ids_augmented)\n\n # Validate that all sequences can be processed based on their sequence\n # lengths and the maximum sequence length the model can handle\n self._validate_sequence_lengths(\n actual_sequence_lengths, batch_examples, attribute, inference_mode\n )\n\n # Add padding so that whole batch can be fed to the model\n padded_token_ids = self._add_padding_to_batch(\n batch_token_ids_augmented, max_input_sequence_length\n )\n\n # Compute attention mask based on actual_sequence_length\n batch_attention_mask = self._compute_attention_mask(\n actual_sequence_lengths, max_input_sequence_length\n )\n\n # Get token level features from the model\n sequence_hidden_states = self._compute_batch_sequence_features(\n batch_attention_mask, padded_token_ids\n )\n\n # Extract features for only non-padding tokens\n sequence_nonpadded_embeddings = self._extract_nonpadded_embeddings(\n sequence_hidden_states, actual_sequence_lengths\n )\n\n # Extract sentence level and post-processed features\n (\n sentence_embeddings,\n sequence_embeddings,\n ) = self._post_process_sequence_embeddings(sequence_nonpadded_embeddings)\n\n # Pad zeros for examples which were truncated in inference mode.\n # This is intentionally done after sentence embeddings have been\n # extracted so that they are not affected\n sequence_embeddings = self._add_extra_padding(\n sequence_embeddings, actual_sequence_lengths\n )\n\n # shape of matrix for all sequence embeddings\n batch_dim = len(sequence_embeddings)\n seq_dim = max(e.shape[0] for e in sequence_embeddings)\n feature_dim = sequence_embeddings[0].shape[1]\n shape = (batch_dim, seq_dim, feature_dim)\n\n # align features with tokens so that we have just one vector per token\n # (don't include sub-tokens)\n sequence_embeddings = train_utils.align_token_features(\n batch_tokens, sequence_embeddings, shape\n )\n\n # sequence_embeddings is a padded numpy array\n # remove the padding, keep just the non-zero vectors\n sequence_final_embeddings = []\n for embeddings, tokens in zip(sequence_embeddings, batch_tokens):\n sequence_final_embeddings.append(embeddings[: len(tokens)])\n sequence_final_embeddings = np.array(sequence_final_embeddings)\n\n return sentence_embeddings, sequence_final_embeddings\n\n def _get_docs_for_batch(\n self,\n batch_examples: List[Message],\n attribute: Text,\n inference_mode: bool = False,\n ) -> List[Dict[Text, Any]]:\n \"\"\"Computes language model docs for all examples in the batch.\n\n Args:\n batch_examples: Batch of message objects for which language model docs\n need to be computed.\n attribute: Property of message to be processed, one of ``TEXT`` or\n ``RESPONSE``.\n inference_mode: Whether the call is during inference or during training.\n\n\n Returns:\n List of language model docs for each message in batch.\n \"\"\"\n batch_tokens, batch_token_ids = self._get_token_ids_for_batch(\n batch_examples, attribute\n )\n\n (\n batch_sentence_features,\n batch_sequence_features,\n ) = self._get_model_features_for_batch(\n batch_token_ids, batch_tokens, batch_examples, attribute, inference_mode\n )\n\n # A doc consists of\n # {'sequence_features': ..., 'sentence_features': ...}\n batch_docs = []\n for index in range(len(batch_examples)):\n doc = {\n SEQUENCE_FEATURES: batch_sequence_features[index],\n SENTENCE_FEATURES: np.reshape(batch_sentence_features[index], (1, -1)),\n }\n batch_docs.append(doc)\n\n return batch_docs\n\n def process_training_data(self, training_data: TrainingData,) -> TrainingData:\n \"\"\"Computes tokens and dense features for each message in training data.\n\n Args:\n training_data: NLU training data to be tokenized and featurized\n config: NLU pipeline config consisting of all components.\n \"\"\"\n batch_size = 64\n\n for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:\n\n non_empty_examples = list(\n filter(lambda x: x.get(attribute), training_data.training_examples)\n )\n\n batch_start_index = 0\n\n while batch_start_index < len(non_empty_examples):\n\n batch_end_index = min(\n batch_start_index + batch_size, len(non_empty_examples)\n )\n # Collect batch examples\n batch_messages = non_empty_examples[batch_start_index:batch_end_index]\n\n # Construct a doc with relevant features\n # extracted(tokens, dense_features)\n batch_docs = self._get_docs_for_batch(batch_messages, attribute)\n\n for index, ex in enumerate(batch_messages):\n self._set_lm_features(batch_docs[index], ex, attribute)\n batch_start_index += batch_size\n\n return training_data\n\n def process(self, messages: List[Message]) -> List[Message]:\n \"\"\"Processes messages by computing tokens and dense features.\"\"\"\n for message in messages:\n self._process_message(message)\n return messages\n\n def _process_message(self, message: Message) -> Message:\n \"\"\"Processes a message by computing tokens and dense features.\"\"\"\n # processing featurizers operates only on TEXT and ACTION_TEXT attributes,\n # because all other attributes are labels which are featurized during\n # training and their features are stored by the model itself.\n for attribute in {TEXT, ACTION_TEXT}:\n if message.get(attribute):\n self._set_lm_features(\n self._get_docs_for_batch(\n [message], attribute=attribute, inference_mode=True\n )[0],\n message,\n attribute,\n )\n return message\n\n def _set_lm_features(\n self, doc: Dict[Text, Any], message: Message, attribute: Text = TEXT\n ) -> None:\n \"\"\"Adds the precomputed word vectors to the messages features.\"\"\"\n sequence_features = doc[SEQUENCE_FEATURES]\n sentence_features = doc[SENTENCE_FEATURES]\n\n self.add_features_to_message(\n sequence=sequence_features,\n sentence=sentence_features,\n attribute=attribute,\n message=message,\n )\n",
"import logging\nimport numpy as np\nimport scipy.sparse\nfrom typing import List, Optional, Dict, Text, Set, Any\n\nfrom rasa.core.featurizers.precomputation import MessageContainerForCoreFeaturization\nfrom rasa.nlu.extractors.extractor import EntityTagSpec\nfrom rasa.nlu.utils import bilou_utils\nfrom rasa.nlu.utils.bilou_utils import BILOU_PREFIXES\nfrom rasa.shared.core.domain import SubState, State, Domain\nfrom rasa.shared.core.constants import PREVIOUS_ACTION, ACTIVE_LOOP, USER, SLOTS\nfrom rasa.shared.core.trackers import is_prev_action_listen_in_state\nfrom rasa.shared.nlu.constants import (\n ENTITIES,\n FEATURE_TYPE_SENTENCE,\n ACTION_TEXT,\n ACTION_NAME,\n INTENT,\n NO_ENTITY_TAG,\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_TAGS,\n TEXT,\n)\nfrom rasa.shared.nlu.training_data.features import Features\nfrom rasa.utils.tensorflow import model_data_utils\n\nlogger = logging.getLogger(__name__)\n\n\nclass SingleStateFeaturizer:\n \"\"\"Base class to transform the dialogue state into an ML format.\n\n Subclasses of SingleStateFeaturizer will decide how a bot will\n transform the dialogue state into a dictionary mapping an attribute\n to its features. Possible attributes are: `INTENT`, `TEXT`, `ACTION_NAME`,\n `ACTION_TEXT`, `ENTITIES`, `SLOTS` and `ACTIVE_LOOP`. Each attribute will be\n featurized into a list of `rasa.utils.features.Features`.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the single state featurizer.\"\"\"\n self._default_feature_states = {}\n self.action_texts = []\n self.entity_tag_specs = []\n\n def _create_entity_tag_specs(\n self, bilou_tagging: bool = False\n ) -> List[EntityTagSpec]:\n \"\"\"Returns the tag to index mapping for entities.\n\n Returns:\n Tag to index mapping.\n \"\"\"\n if ENTITIES not in self._default_feature_states:\n return []\n\n if bilou_tagging:\n tag_id_index_mapping = {\n f\"{prefix}{tag}\": idx_1 * len(BILOU_PREFIXES) + idx_2 + 1\n for tag, idx_1 in self._default_feature_states[ENTITIES].items()\n for idx_2, prefix in enumerate(BILOU_PREFIXES)\n }\n else:\n tag_id_index_mapping = {\n tag: idx + 1 # +1 to keep 0 for the NO_ENTITY_TAG\n for tag, idx in self._default_feature_states[ENTITIES].items()\n }\n\n # NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index\n # needed for correct prediction for padding\n tag_id_index_mapping[NO_ENTITY_TAG] = 0\n\n # TODO\n # The entity states used to create the tag-idx-mapping contains the\n # entities and the concatenated entity and roles/groups. We do not\n # distinguish between entities and roles/groups right now.\n # we return a list to anticipate that\n return [\n EntityTagSpec(\n tag_name=ENTITY_ATTRIBUTE_TYPE,\n tags_to_ids=tag_id_index_mapping,\n ids_to_tags={value: key for key, value in tag_id_index_mapping.items()},\n num_tags=len(tag_id_index_mapping),\n )\n ]\n\n def prepare_for_training(\n self, domain: Domain, bilou_tagging: bool = False,\n ) -> None:\n \"\"\"Gets necessary information for featurization from domain.\n\n Args:\n domain: An instance of :class:`rasa.shared.core.domain.Domain`.\n bilou_tagging: indicates whether BILOU tagging should be used or not\n \"\"\"\n # store feature states for each attribute in order to create binary features\n def convert_to_dict(feature_states: List[Text]) -> Dict[Text, int]:\n return {\n feature_state: idx for idx, feature_state in enumerate(feature_states)\n }\n\n self._default_feature_states[INTENT] = convert_to_dict(domain.intents)\n self._default_feature_states[ACTION_NAME] = convert_to_dict(\n domain.action_names_or_texts\n )\n self._default_feature_states[ENTITIES] = convert_to_dict(domain.entity_states)\n self._default_feature_states[SLOTS] = convert_to_dict(domain.slot_states)\n self._default_feature_states[ACTIVE_LOOP] = convert_to_dict(domain.form_names)\n self.action_texts = domain.action_texts\n self.entity_tag_specs = self._create_entity_tag_specs(bilou_tagging)\n\n def _state_features_for_attribute(\n self, sub_state: SubState, attribute: Text\n ) -> Dict[Text, int]:\n # FIXME: the code below is not type-safe, but fixing it\n # would require more refactoring, for instance using\n # data classes in our states\n if attribute in {INTENT, ACTION_NAME}:\n return {sub_state[attribute]: 1} # type: ignore[dict-item]\n elif attribute == ENTITIES:\n return {entity: 1 for entity in sub_state.get(ENTITIES, [])}\n elif attribute == ACTIVE_LOOP:\n return {sub_state[\"name\"]: 1} # type: ignore[dict-item]\n elif attribute == SLOTS:\n return {\n f\"{slot_name}_{i}\": value\n for slot_name, slot_as_feature in sub_state.items()\n for i, value in enumerate(slot_as_feature)\n }\n else:\n raise ValueError(\n f\"Given attribute '{attribute}' is not supported. \"\n f\"It must be one of '{self._default_feature_states.keys()}'.\"\n )\n\n def _create_features(\n self, sub_state: SubState, attribute: Text, sparse: bool = False\n ) -> List[Features]:\n state_features = self._state_features_for_attribute(sub_state, attribute)\n\n features = np.zeros(len(self._default_feature_states[attribute]), np.float32)\n for state_feature, value in state_features.items():\n # check that the value is in default_feature_states to be able to assign\n # its value\n if state_feature in self._default_feature_states[attribute]:\n features[self._default_feature_states[attribute][state_feature]] = value\n features = np.expand_dims(features, 0)\n\n if sparse:\n features = scipy.sparse.coo_matrix(features)\n\n return [\n Features(\n features, FEATURE_TYPE_SENTENCE, attribute, self.__class__.__name__\n )\n ]\n\n @staticmethod\n def _to_sparse_sentence_features(\n sparse_sequence_features: List[Features],\n ) -> List[Features]:\n return [\n Features(\n scipy.sparse.coo_matrix(feature.features.sum(0)),\n FEATURE_TYPE_SENTENCE,\n feature.attribute,\n feature.origin,\n )\n for feature in sparse_sequence_features\n ]\n\n @staticmethod\n def _get_name_attribute(attributes: Set[Text]) -> Optional[Text]:\n # there is always either INTENT or ACTION_NAME\n return next(\n (\n attribute\n for attribute in attributes\n if attribute in {INTENT, ACTION_NAME}\n ),\n None,\n )\n\n def _extract_state_features(\n self,\n sub_state: SubState,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n sparse: bool = False,\n ) -> Dict[Text, List[Features]]:\n\n # Remove entities from possible attributes\n attributes = set(\n attribute for attribute in sub_state.keys() if attribute != ENTITIES\n )\n\n if precomputations is not None:\n\n # Collect features for all those attributes\n attributes_to_features = precomputations.collect_features(\n sub_state, attributes=attributes\n )\n # if features for INTENT or ACTION_NAME exist,\n # they are always sparse sequence features;\n # transform them to sentence sparse features\n if attributes_to_features.get(INTENT):\n attributes_to_features[INTENT] = self._to_sparse_sentence_features(\n attributes_to_features[INTENT]\n )\n if attributes_to_features.get(ACTION_NAME):\n attributes_to_features[ACTION_NAME] = self._to_sparse_sentence_features(\n attributes_to_features[ACTION_NAME]\n )\n\n # Combine and sort the features:\n # Per attribute, combine features of same type and level into one Feature,\n # and (if there are any such features) store the results in a list where\n # - all the sparse features are listed first and a\n # - sequence feature is always listed before the sentence feature of the\n # same type (sparse/not sparse).\n output = {\n attribute: Features.reduce(\n features_list=features_list, expected_origins=None\n )\n for attribute, features_list in attributes_to_features.items()\n if len(features_list) > 0 # otherwise, following will fail\n }\n else:\n output = {}\n\n # Check that the name attribute has features\n name_attribute = self._get_name_attribute(attributes)\n if name_attribute and name_attribute not in output:\n # nlu pipeline didn't create features for user or action\n # this might happen, for example, when we have action_name in the state\n # but it did not get featurized because only character level\n # CountVectorsFeaturizer was included in the config.\n output[name_attribute] = self._create_features(\n sub_state, name_attribute, sparse\n )\n return output\n\n def encode_state(\n self,\n state: State,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n ) -> Dict[Text, List[Features]]:\n \"\"\"Encode the given state.\n\n Args:\n state: The state to encode\n precomputations: Contains precomputed features and attributes.\n\n Returns:\n A dictionary of state_type to list of features.\n \"\"\"\n state_features = {}\n for state_type, sub_state in state.items():\n if state_type == PREVIOUS_ACTION:\n state_features.update(\n self._extract_state_features(\n sub_state, precomputations=precomputations, sparse=True,\n )\n )\n # featurize user only if it is \"real\" user input,\n # i.e. input from a turn after action_listen\n if state_type == USER and is_prev_action_listen_in_state(state):\n\n state_features.update(\n self._extract_state_features(\n sub_state, precomputations=precomputations, sparse=True,\n )\n )\n if sub_state.get(ENTITIES):\n state_features[ENTITIES] = self._create_features(\n sub_state, ENTITIES, sparse=True\n )\n\n if state_type in {SLOTS, ACTIVE_LOOP}:\n state_features[state_type] = self._create_features(\n sub_state, state_type, sparse=True\n )\n\n return state_features\n\n def encode_entities(\n self,\n entity_data: Dict[Text, Any],\n precomputations: Optional[MessageContainerForCoreFeaturization],\n bilou_tagging: bool = False,\n ) -> Dict[Text, List[Features]]:\n \"\"\"Encode the given entity data.\n\n Produce numeric entity tags for tokens.\n\n Args:\n entity_data: The dict containing the text and entity labels and locations\n precomputations: Contains precomputed features and attributes.\n bilou_tagging: indicates whether BILOU tagging should be used or not\n\n Returns:\n A dictionary of entity type to list of features.\n \"\"\"\n # TODO\n # The entity states used to create the tag-idx-mapping contains the\n # entities and the concatenated entity and roles/groups. We do not\n # distinguish between entities and roles/groups right now.\n if (\n not entity_data\n or not self.entity_tag_specs\n or self.entity_tag_specs[0].num_tags < 2\n ):\n # we cannot build a classifier with fewer than 2 classes\n return {}\n\n message = precomputations.lookup_message(user_text=entity_data[TEXT])\n message.data[ENTITIES] = entity_data[ENTITIES]\n\n if not message:\n return {}\n\n if bilou_tagging:\n bilou_utils.apply_bilou_schema_to_message(message)\n\n return {\n ENTITY_TAGS: [\n model_data_utils.get_tag_ids(\n message, self.entity_tag_specs[0], bilou_tagging\n )\n ]\n }\n\n def _encode_action(\n self,\n action: Text,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n ) -> Dict[Text, List[Features]]:\n if action in self.action_texts:\n action_as_sub_state = {ACTION_TEXT: action}\n else:\n action_as_sub_state = {ACTION_NAME: action}\n\n return self._extract_state_features(\n action_as_sub_state, precomputations=precomputations\n )\n\n def encode_all_labels(\n self,\n domain: Domain,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n ) -> List[Dict[Text, List[Features]]]:\n \"\"\"Encode all action from the domain.\n\n Args:\n domain: The domain that contains the actions.\n precomputations: Contains precomputed features and attributes.\n\n Returns:\n A list of encoded actions.\n \"\"\"\n return [\n self._encode_action(action, precomputations)\n for action in domain.action_names_or_texts\n ]\n\n\nclass IntentTokenizerSingleStateFeaturizer(SingleStateFeaturizer):\n \"\"\"A SingleStateFeaturizer for use with policies that predict intent labels.\"\"\"\n\n def _encode_intent(\n self,\n intent: Text,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n ) -> Dict[Text, List[Features]]:\n \"\"\"Extracts a numeric representation of an intent.\n\n Args:\n intent: Intent to be encoded.\n precomputations: Contains precomputed features and attributes.\n\n Returns:\n Encoded representation of intent.\n \"\"\"\n intent_as_sub_state = {INTENT: intent}\n return self._extract_state_features(intent_as_sub_state, precomputations)\n\n def encode_all_labels(\n self,\n domain: Domain,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n ) -> List[Dict[Text, List[Features]]]:\n \"\"\"Encodes all relevant labels from the domain using the given precomputations.\n\n Args:\n domain: The domain that contains the labels.\n precomputations: Contains precomputed features and attributes.\n\n Returns:\n A list of encoded labels.\n \"\"\"\n return [\n self._encode_intent(intent, precomputations) for intent in domain.intents\n ]\n"
] | [
[
"numpy.reshape",
"numpy.array",
"numpy.zeros"
],
[
"numpy.expand_dims"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
slohani-ai/data-centric-in-qis | [
"bbc545454f7d98a28a4fc83f2f6b14de253fcb6c"
] | [
"Toy-model/CP_werner_with_MA/utils/Purity_Measure.py"
] | [
"import tensorflow as tf\n\ndef purity(dm):\n mul = tf.math.real(tf.linalg.trace(tf.linalg.matmul(dm, dm, adjoint_b=True)))\n return mul.numpy()\n"
] | [
[
"tensorflow.linalg.matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qmkakaxi/DIG_FL | [
"9a03ae17b0210166409baf00c2aa87e4809fff1c"
] | [
"standaloneBeta/DIGFL_hfl/models/noisylabel.py"
] | [
"import numpy as np\r\nimport torch\r\n\r\n\r\n\r\n\r\n\r\ndef noisy_label_change_client(dataName, dict_users, dataset, noisy_client, noisy_rate):\r\n \"\"\"\r\n change correct label into noisy label\r\n dataName:'MNIST' or 'cifar'\r\n \"\"\"\r\n if dataName == 'MNIST':\r\n originTargets = dataset.train_labels.numpy()\r\n else:\r\n originTargets = dataset.targets\r\n allorigin_targets = set(originTargets)\r\n\r\n if noisy_client > len(dict_users):\r\n print('too many noisy client')\r\n raise NameError('noisy_client')\r\n exit()\r\n noisyDataList = []\r\n for userIndex in range(noisy_client):\r\n noisyDataList.extend(list(\r\n np.random.choice(list(dict_users[userIndex]), int(len(dict_users[userIndex]) * noisy_rate), replace=False)))\r\n\r\n for index in noisyDataList:\r\n all_targets = allorigin_targets\r\n all_targets = all_targets - set([originTargets[index]])\r\n new_label = np.random.choice(list(all_targets), 1, replace=False)\r\n originTargets[index] = new_label[0]\r\n dataset.targets = torch.tensor(originTargets)\r\n return dataset, noisyDataList,torch.tensor(originTargets)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fairseq-FT/fairseq | [
"18725499144c1bba7c151b796ba774e59d36eaa9",
"18725499144c1bba7c151b796ba774e59d36eaa9",
"18725499144c1bba7c151b796ba774e59d36eaa9",
"18725499144c1bba7c151b796ba774e59d36eaa9",
"18725499144c1bba7c151b796ba774e59d36eaa9",
"18725499144c1bba7c151b796ba774e59d36eaa9"
] | [
"examples/simultaneous_translation/utils/functions.py",
"fairseq/models/wav2vec/wav2vec2_asr.py",
"fairseq/modules/quantization/pq/utils.py",
"fairseq/benchmark/dummy_model.py",
"examples/speech_to_text/data_utils.py",
"tests/test_constraints.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\n\n\ndef exclusive_cumprod(tensor, dim: int, eps: float = 1e-10):\n \"\"\"\n Implementing exclusive cumprod.\n There is cumprod in pytorch, however there is no exclusive mode.\n cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i]\n exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]\n \"\"\"\n tensor_size = list(tensor.size())\n tensor_size[dim] = 1\n return_tensor = safe_cumprod(\n torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim),\n dim=dim,\n eps=eps,\n )\n\n if dim == 0:\n return return_tensor[:-1]\n elif dim == 1:\n return return_tensor[:, :-1]\n elif dim == 2:\n return return_tensor[:, :, :-1]\n else:\n raise RuntimeError(\"Cumprod on dimension 3 and more is not implemented\")\n\n\ndef safe_cumprod(tensor, dim: int, eps: float = 1e-10):\n \"\"\"\n An implementation of cumprod to prevent precision issue.\n cumprod(x)\n = [x1, x1x2, x1x2x3, ....]\n = [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...]\n = exp(cumsum(log(x)))\n \"\"\"\n\n if (tensor + eps < 0).any().item():\n raise RuntimeError(\n \"Safe cumprod can only take non-negative tensors as input.\"\n \"Consider use torch.cumprod if you want to calculate negative values.\"\n )\n\n log_tensor = torch.log(tensor + eps)\n cumsum_log_tensor = torch.cumsum(log_tensor, dim)\n exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)\n return exp_cumsum_log_tensor\n\n\ndef lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False):\n \"\"\"\n Convert a tensor of lengths to mask\n For example, lengths = [[2, 3, 4]], max_len = 5\n mask =\n [[1, 1, 1],\n [1, 1, 1],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 0]]\n \"\"\"\n assert len(lengths.size()) <= 2\n if len(lengths) == 2:\n if dim == 1:\n lengths = lengths.t()\n lengths = lengths\n else:\n lengths = lengths.unsqueeze(1)\n\n # lengths : batch_size, 1\n lengths = lengths.view(-1, 1)\n\n batch_size = lengths.size(0)\n # batch_size, max_len\n mask = torch.arange(max_len).expand(batch_size, max_len).type_as(lengths) < lengths\n\n if negative_mask:\n mask = ~mask\n\n if dim == 0:\n # max_len, batch_size\n mask = mask.t()\n\n return mask\n\n\ndef moving_sum(x, start_idx: int, end_idx: int):\n \"\"\"\n From MONOTONIC CHUNKWISE ATTENTION\n https://arxiv.org/pdf/1712.05382.pdf\n Equation (18)\n\n x = [x_1, x_2, ..., x_N]\n MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m\n for n in {1, 2, 3, ..., N}\n\n x : src_len, batch_size\n start_idx : start idx\n end_idx : end idx\n\n Example\n src_len = 5\n batch_size = 3\n x =\n [[ 0, 5, 10],\n [ 1, 6, 11],\n [ 2, 7, 12],\n [ 3, 8, 13],\n [ 4, 9, 14]]\n\n MovingSum(x, 3, 1) =\n [[ 0, 5, 10],\n [ 1, 11, 21],\n [ 3, 18, 33],\n [ 6, 21, 36],\n [ 9, 24, 39]]\n\n MovingSum(x, 1, 3) =\n [[ 3, 18, 33],\n [ 6, 21, 36],\n [ 9, 24, 39],\n [ 7, 17, 27],\n [ 4, 9, 14]]\n \"\"\"\n assert start_idx > 0 and end_idx > 0\n assert len(x.size()) == 2\n src_len, batch_size = x.size()\n # batch_size, 1, src_len\n x = x.t().unsqueeze(1)\n # batch_size, 1, src_len\n moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1])\n\n moving_sum = (\n torch.nn.functional.conv1d(\n x, moving_sum_weight, padding=start_idx + end_idx - 1\n )\n .squeeze(1)\n .t()\n )\n moving_sum = moving_sum[end_idx:-start_idx]\n\n assert src_len == moving_sum.size(0)\n assert batch_size == moving_sum.size(1)\n\n return moving_sum\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom argparse import Namespace\nimport contextlib\nimport copy\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dataclasses import dataclass, field\nfrom omegaconf import MISSING, II, open_dict\nfrom typing import Any\n\nfrom fairseq import checkpoint_utils, tasks, utils\nfrom fairseq.dataclass import FairseqDataclass\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.tasks import FairseqTask\nfrom fairseq.models import (\n BaseFairseqModel,\n FairseqEncoder,\n FairseqEncoderDecoderModel,\n FairseqIncrementalDecoder,\n register_model,\n)\nfrom fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES\nfrom fairseq.modules import LayerNorm, PositionalEmbedding, TransformerDecoderLayer\n\n\n@dataclass\nclass Wav2Vec2AsrConfig(FairseqDataclass):\n w2v_path: str = field(\n default=MISSING, metadata={\"help\": \"path to wav2vec 2.0 model\"}\n )\n no_pretrained_weights: bool = field(\n default=False, metadata={\"help\": \"if true, does not load pretrained weights\"}\n )\n dropout_input: float = field(\n default=0.0,\n metadata={\"help\": \"dropout to apply to the input (after feat extr)\"},\n )\n final_dropout: float = field(\n default=0.0,\n metadata={\"help\": \"dropout after transformer and before final projection\"},\n )\n dropout: float = field(\n default=0.0, metadata={\"help\": \"dropout probability inside wav2vec 2.0 model\"}\n )\n attention_dropout: float = field(\n default=0.0,\n metadata={\n \"help\": \"dropout probability for attention weights inside wav2vec 2.0 model\"\n },\n )\n activation_dropout: float = field(\n default=0.0,\n metadata={\n \"help\": \"dropout probability after activation in FFN inside wav2vec 2.0 model\"\n },\n )\n\n # masking\n apply_mask: bool = field(\n default=False, metadata={\"help\": \"apply masking during fine-tuning\"}\n )\n mask_length: int = field(\n default=10, metadata={\"help\": \"repeat the mask indices multiple times\"}\n )\n mask_prob: float = field(\n default=0.5,\n metadata={\n \"help\": \"probability of replacing a token with mask (normalized by length)\"\n },\n )\n mask_selection: MASKING_DISTRIBUTION_CHOICES = field(\n default=\"static\", metadata={\"help\": \"how to choose masks\"}\n )\n mask_other: float = field(\n default=0,\n metadata={\n \"help\": \"secondary mask argument (used for more complex distributions), \"\n \"see help in compute_mask_indices\"\n },\n )\n no_mask_overlap: bool = field(\n default=False, metadata={\"help\": \"whether to allow masks to overlap\"}\n )\n\n # channel masking\n mask_channel_length: int = field(\n default=10, metadata={\"help\": \"length of the mask for features (channels)\"}\n )\n mask_channel_prob: float = field(\n default=0.0, metadata={\"help\": \"probability of replacing a feature with 0\"}\n )\n mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(\n default=\"static\",\n metadata={\"help\": \"how to choose mask length for channel masking\"},\n )\n mask_channel_other: float = field(\n default=0,\n metadata={\n \"help\": \"secondary mask argument (used for more complex distributions), \"\n \"see help in compute_mask_indicesh\"\n },\n )\n no_mask_channel_overlap: bool = field(\n default=False, metadata={\"help\": \"whether to allow channel masks to overlap\"}\n )\n freeze_finetune_updates: int = field(\n default=0, metadata={\"help\": \"dont finetune wav2vec for this many updates\"}\n )\n feature_grad_mult: float = field(\n default=0.0, metadata={\"help\": \"reset feature grad mult in wav2vec 2.0 to this\"}\n )\n layerdrop: float = field(\n default=0.0, metadata={\"help\": \"probability of dropping a layer in wav2vec 2.0\"}\n )\n normalize: bool = II(\"task.normalize\")\n data: str = II(\"task.data\")\n # this holds the loaded wav2vec args\n w2v_args: Any = None\n\n\n@dataclass\nclass Wav2Vec2CtcConfig(Wav2Vec2AsrConfig):\n pass\n\n\n@register_model(\"wav2vec_ctc\", dataclass=Wav2Vec2CtcConfig)\nclass Wav2VecCtc(BaseFairseqModel):\n def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel):\n super().__init__()\n self.cfg = cfg\n self.w2v_encoder = w2v_encoder\n\n def upgrade_state_dict_named(self, state_dict, name):\n super().upgrade_state_dict_named(state_dict, name)\n return state_dict\n\n @classmethod\n def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):\n \"\"\"Build a new model instance.\"\"\"\n w2v_encoder = Wav2VecEncoder(cfg, task.target_dictionary)\n return cls(cfg, w2v_encoder)\n\n def get_normalized_probs(self, net_output, log_probs):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n\n logits = net_output[\"encoder_out\"]\n if log_probs:\n return utils.log_softmax(logits.float(), dim=-1)\n else:\n return utils.softmax(logits.float(), dim=-1)\n\n def forward(self, **kwargs):\n x = self.w2v_encoder(**kwargs)\n return x\n\n\n@dataclass\nclass Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig):\n decoder_embed_dim: int = field(\n default=768, metadata={\"help\": \"decoder embedding dimension\"}\n )\n decoder_ffn_embed_dim: int = field(\n default=3072, metadata={\"help\": \"decoder embedding dimension for FFN\"}\n )\n decoder_layers: int = field(default=6, metadata={\"help\": \"num of decoder layers\"})\n decoder_layerdrop: float = field(\n default=0.0, metadata={\"help\": \"decoder layerdrop chance\"}\n )\n decoder_attention_heads: int = field(\n default=4, metadata={\"help\": \"num decoder attention heads\"}\n )\n decoder_learned_pos: bool = field(\n default=False,\n metadata={\"help\": \"use learned positional embeddings in the decoder\"},\n )\n decoder_normalize_before: bool = field(\n default=False, metadata={\"help\": \"apply layernorm before each decoder block\"}\n )\n no_token_positional_embeddings: bool = field(\n default=False,\n metadata={\n \"help\": \"if set, disables positional embeddings (outside self attention)\"\n },\n )\n decoder_dropout: float = field(\n default=0.0, metadata={\"help\": \"dropout probability in the decoder\"}\n )\n decoder_attention_dropout: float = field(\n default=0.0,\n metadata={\n \"help\": \"dropout probability for attention weights inside the decoder\"\n },\n )\n decoder_activation_dropout: float = field(\n default=0.0,\n metadata={\n \"help\": \"dropout probability after activation in FFN inside the decoder\"\n },\n )\n max_target_positions: int = field(\n default=2048, metadata={\"help\": \"max target positions\"}\n )\n share_decoder_input_output_embed: bool = field(\n default=False, metadata={\"help\": \"share decoder input and output embeddings\"}\n )\n\n\n@register_model(\"wav2vec_seq2seq\", dataclass=Wav2Vec2Seq2SeqConfig)\nclass Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel):\n def __init__(self, encoder, decoder):\n super().__init__(encoder, decoder)\n\n @classmethod\n def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask):\n \"\"\"Build a new model instance.\"\"\"\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n\n def build_embedding(dictionary, embed_dim):\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n emb = Embedding(num_embeddings, embed_dim, padding_idx)\n return emb\n\n decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)\n\n encoder = cls.build_encoder(cfg)\n decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)\n\n return Wav2Vec2Seq2SeqModel(encoder, decoder)\n\n @classmethod\n def build_encoder(cls, cfg: Wav2Vec2AsrConfig):\n return Wav2VecEncoder(cfg)\n\n @classmethod\n def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens):\n return TransformerDecoder(cfg, tgt_dict, embed_tokens)\n\n def forward(self, **kwargs):\n encoder_out = self.encoder(tbc=False, **kwargs)\n decoder_out = self.decoder(encoder_out=encoder_out, **kwargs)\n return decoder_out\n\n def upgrade_state_dict_named(self, state_dict, name):\n super().upgrade_state_dict_named(state_dict, name)\n return state_dict\n\n\nclass Wav2VecEncoder(FairseqEncoder):\n def __init__(self, cfg: Wav2Vec2AsrConfig, tgt_dict=None):\n self.apply_mask = cfg.apply_mask\n\n arg_overrides = {\n \"dropout\": cfg.dropout,\n \"activation_dropout\": cfg.activation_dropout,\n \"dropout_input\": cfg.dropout_input,\n \"attention_dropout\": cfg.attention_dropout,\n \"mask_length\": cfg.mask_length,\n \"mask_prob\": cfg.mask_prob,\n \"mask_selection\": cfg.mask_selection,\n \"mask_other\": cfg.mask_other,\n \"no_mask_overlap\": cfg.no_mask_overlap,\n \"mask_channel_length\": cfg.mask_channel_length,\n \"mask_channel_prob\": cfg.mask_channel_prob,\n \"mask_channel_selection\": cfg.mask_channel_selection,\n \"mask_channel_other\": cfg.mask_channel_other,\n \"no_mask_channel_overlap\": cfg.no_mask_channel_overlap,\n \"encoder_layerdrop\": cfg.layerdrop,\n \"feature_grad_mult\": cfg.feature_grad_mult,\n }\n\n if cfg.w2v_args is None:\n state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)\n w2v_args = state.get(\"cfg\", None)\n if w2v_args is None:\n w2v_args = convert_namespace_to_omegaconf(state[\"args\"])\n cfg.w2v_args = w2v_args\n else:\n state = None\n w2v_args = cfg.w2v_args\n if isinstance(w2v_args, Namespace):\n cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)\n\n assert cfg.normalize == w2v_args.task.normalize, (\n \"Fine-tuning works best when data normalization is the same. \"\n \"Please check that --normalize is set or unset for both pre-training and here\"\n )\n\n w2v_args.task.data = cfg.data\n task = tasks.setup_task(w2v_args.task)\n model = task.build_model(w2v_args.model)\n\n if state is not None and not cfg.no_pretrained_weights:\n model.load_state_dict(state[\"model\"], strict=True)\n\n model.remove_pretraining_modules()\n\n super().__init__(task.source_dictionary)\n\n d = w2v_args.model.encoder_embed_dim\n\n self.w2v_model = model\n\n self.final_dropout = nn.Dropout(cfg.final_dropout)\n self.freeze_finetune_updates = cfg.freeze_finetune_updates\n self.num_updates = 0\n\n if tgt_dict is not None:\n self.proj = Linear(d, len(tgt_dict))\n elif getattr(cfg, \"decoder_embed_dim\", d) != d:\n self.proj = Linear(d, cfg.decoder_embed_dim)\n else:\n self.proj = None\n\n def set_num_updates(self, num_updates):\n \"\"\"Set the number of parameters updates.\"\"\"\n super().set_num_updates(num_updates)\n self.num_updates = num_updates\n\n def forward(self, source, padding_mask, tbc=True, **kwargs):\n\n w2v_args = {\n \"source\": source,\n \"padding_mask\": padding_mask,\n \"mask\": self.apply_mask and self.training,\n }\n\n ft = self.freeze_finetune_updates <= self.num_updates\n\n with torch.no_grad() if not ft else contextlib.ExitStack():\n x, padding_mask = self.w2v_model.extract_features(**w2v_args)\n\n if tbc:\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n x = self.final_dropout(x)\n\n if self.proj:\n x = self.proj(x)\n\n return {\n \"encoder_out\": x, # T x B x C\n \"encoder_padding_mask\": padding_mask, # B x T\n \"padding_mask\": padding_mask,\n }\n\n def reorder_encoder_out(self, encoder_out, new_order):\n if encoder_out[\"encoder_out\"] is not None:\n encoder_out[\"encoder_out\"] = encoder_out[\"encoder_out\"].index_select(\n 1, new_order\n )\n if encoder_out[\"encoder_padding_mask\"] is not None:\n encoder_out[\"encoder_padding_mask\"] = encoder_out[\n \"encoder_padding_mask\"\n ].index_select(0, new_order)\n return encoder_out\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n return None\n\n def upgrade_state_dict_named(self, state_dict, name):\n return state_dict\n\n\nclass TransformerDecoder(FairseqIncrementalDecoder):\n \"\"\"\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(\n self,\n cfg: Wav2Vec2Seq2SeqConfig,\n dictionary,\n embed_tokens,\n no_encoder_attn=False,\n ):\n super().__init__(dictionary)\n\n self.dropout = cfg.decoder_dropout\n self.share_input_output_embed = cfg.share_decoder_input_output_embed\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = cfg.decoder_embed_dim\n self.output_embed_dim = cfg.decoder_embed_dim\n\n self.layerdrop = cfg.decoder_layerdrop\n\n padding_idx = embed_tokens.padding_idx\n self.max_target_positions = cfg.max_target_positions\n\n self.embed_tokens = embed_tokens\n self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim\n\n self.project_in_dim = (\n Linear(input_embed_dim, embed_dim, bias=False)\n if embed_dim != input_embed_dim\n else None\n )\n\n self.embed_positions = (\n PositionalEmbedding(\n cfg.max_target_positions,\n embed_dim,\n padding_idx,\n learned=cfg.decoder_learned_pos,\n )\n if not cfg.no_token_positional_embeddings\n else None\n )\n\n # TODO: update this when transformer gets converted to dataclass configs\n transformer_cfg = copy.deepcopy(cfg)\n with open_dict(transformer_cfg):\n transformer_cfg.dropout = transformer_cfg.decoder_dropout\n transformer_cfg.attention_dropout = (\n transformer_cfg.decoder_attention_dropout\n )\n transformer_cfg.activation_dropout = (\n transformer_cfg.decoder_activation_dropout\n )\n\n self.layers = nn.ModuleList([])\n self.layers.extend(\n [\n TransformerDecoderLayer(transformer_cfg, no_encoder_attn)\n for _ in range(transformer_cfg.decoder_layers)\n ]\n )\n\n if not self.share_input_output_embed:\n self.embed_out = nn.Parameter(\n torch.Tensor(len(dictionary), self.output_embed_dim)\n )\n nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)\n\n if transformer_cfg.decoder_normalize_before:\n self.layer_norm = LayerNorm(embed_dim)\n else:\n self.layer_norm = None\n\n def forward(\n self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused\n ):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (Tensor, optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n prev_output_tokens = prev_output_tokens.long()\n x, extra = self.extract_features(\n prev_output_tokens, encoder_out, incremental_state\n )\n x = self.output_layer(x)\n return x, extra\n\n def extract_features(\n self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused\n ):\n \"\"\"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n\n # embed positions\n positions = (\n self.embed_positions(\n prev_output_tokens, incremental_state=incremental_state\n )\n if self.embed_positions is not None\n else None\n )\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n x = self.embed_scale * self.embed_tokens(prev_output_tokens)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n x = F.dropout(x, p=self.dropout, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n attn = None\n\n inner_states = [x]\n\n # decoder layers\n for layer in self.layers:\n dropout_probability = np.random.random()\n if not self.training or (dropout_probability > self.layerdrop):\n x, attn, _ = layer(\n x,\n encoder_out[\"encoder_out\"] if encoder_out is not None else None,\n encoder_out[\"encoder_padding_mask\"]\n if encoder_out is not None\n else None,\n incremental_state,\n self_attn_mask=self.buffered_future_mask(x)\n if incremental_state is None\n else None,\n )\n inner_states.append(x)\n\n if self.layer_norm:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n return x, {\"attn\": attn, \"inner_states\": inner_states}\n\n def output_layer(self, features, **kwargs):\n \"\"\"Project features to the vocabulary size.\"\"\"\n # project back to size of vocabulary\n if self.share_input_output_embed:\n return F.linear(features, self.embed_tokens.weight)\n else:\n return F.linear(features, self.embed_out)\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n if (\n not hasattr(self, \"_future_mask\")\n or self._future_mask is None\n or self._future_mask.device != tensor.device\n or self._future_mask.size(0) < dim\n ):\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(tensor.new(dim, dim)), 1\n )\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n return state_dict\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.0)\n return m\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport re\nfrom operator import attrgetter, itemgetter\n\nimport numpy as np\nimport torch.distributed as dist\nimport torch.nn as nn\n\nfrom .modules import PQConv2d, PQEmbedding, PQLinear\nfrom .pq import PQ\n\n\ndef quantize_model_(\n model,\n size_tracker,\n layers_to_quantize,\n block_sizes_config,\n n_centroids_config,\n step=0,\n n_iter=15,\n eps=1e-6,\n max_tentatives=100,\n verbose=True,\n):\n \"\"\"\n Quantize a model in-place by stages. All the targeted\n layers are replaced by their quantized counterpart,\n and the model is ready for the finetuning of the\n centroids in a standard training loop (no modifications\n required). Note that we do not quantize biases.\n\n Args:\n - model: a nn.Module\n - size_tracker: useful for tracking quatization statistics\n - layers_to_quantize: a list containing regexps for\n filtering the layers to quantize at each stage according\n to their name (as in model.named_parameters())\n - block_sizes_config: dict like\n {\n 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),\n 'Linear': ('in_features', {'*': 8})\n }\n For instance, all conv2d layers with kernel size 3x3 have\n a block size of 9 and all Linear layers are quantized with\n a block size of 8, irrespective of their size.\n - n_centroids_config: dict like\n {\n 'Conv2d': ('kernel_size', {'*': 256}),\n 'Linear': ('in_features', {'*': 256})\n }\n For instance, all conv2d layers are quantized with 256 centroids\n - step: the layers to quantize inplace corresponding\n to layers_to_quantize[step]\n \"\"\"\n\n quantized_layers = get_layers(model, layers_to_quantize[step])\n\n for layer in quantized_layers:\n\n # book-keeping\n is_master_process = (not dist.is_initialized()) or (\n dist.is_initialized() and dist.get_rank() == 0\n )\n verbose = verbose and is_master_process\n\n # get block size and centroids\n module = attrgetter(layer)(model)\n block_size = get_param(module, layer, block_sizes_config)\n n_centroids = get_param(module, layer, n_centroids_config)\n if verbose:\n logging.info(\n f\"Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids\"\n )\n\n # quantize layer\n weight = module.weight.data.clone()\n is_bias = \"bias\" in [x[0] for x in module.named_parameters()]\n bias = module.bias.data.clone() if is_bias else None\n quantizer = PQ(\n weight,\n block_size,\n n_centroids=n_centroids,\n n_iter=n_iter,\n eps=eps,\n max_tentatives=max_tentatives,\n verbose=verbose,\n )\n\n # quantization performed on all GPUs with same seed\n quantizer.encode()\n centroids = quantizer.centroids.contiguous()\n assignments = quantizer.assignments.contiguous()\n\n # broadcast results to make sure weights are up-to-date\n if dist.is_initialized():\n dist.broadcast(centroids, 0)\n dist.broadcast(assignments, 0)\n\n # instantiate the quantized counterpart\n if isinstance(module, nn.Linear):\n out_features, in_features = map(\n lambda k: module.__dict__[k], [\"out_features\", \"in_features\"]\n )\n quantized_module = PQLinear(\n centroids, assignments, bias, in_features, out_features\n )\n elif isinstance(module, nn.Embedding):\n num_embeddings, embedding_dim = map(\n lambda k: module.__dict__[k], [\"num_embeddings\", \"embedding_dim\"]\n )\n quantized_module = PQEmbedding(\n centroids, assignments, num_embeddings, embedding_dim\n )\n elif isinstance(module, nn.Conv2d):\n out_channels, in_channels, kernel_size = map(\n lambda k: module.__dict__[k],\n [\"out_channels\", \"in_channels\", \"kernel_size\"],\n )\n stride, padding, dilation, groups, padding_mode = map(\n lambda k: module.__dict__[k],\n [\"stride\", \"padding\", \"dilation\", \"groups\", \"padding_mode\"],\n )\n\n quantized_module = PQConv2d(\n centroids,\n assignments,\n bias,\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n padding_mode=padding_mode,\n )\n else:\n raise ValueError(f\"Module {module} not yet supported for quantization\")\n\n # replace layer by its quantized counterpart\n attrsetter(layer)(model, quantized_module)\n\n # update statistics\n size_tracker.update(weight, block_size, n_centroids)\n\n # return name of quantized layers\n return quantized_layers\n\n\ndef get_layers(model, filter_regexp):\n \"\"\"\n Filters out the layers according to a regexp. Note that\n we omit biases.\n\n Args:\n - model: a nn.Module\n - filter_regexp: a regexp to filter the layers to keep\n according to their name in model.named_parameters().\n For instance, the regexp:\n\n down_layers\\\\.[123456]\\\\.(conv[12]|identity\\\\.conv))\n\n is keeping blocks down_layers from 1 to 6, and inside\n each block is keeping conv1, conv2 and identity.conv.\n\n Remarks:\n - We add (module\\\\.)? at the beginning of the regexp to\n account for the possible use of nn.parallel.DataParallel\n \"\"\"\n\n # get all parameter names\n all_layers = map(itemgetter(0), model.named_parameters())\n\n # remove biases\n all_layers = filter(lambda x: \"bias\" not in x, all_layers)\n\n # remove .weight in all other names (or .weight_orig is spectral norm)\n all_layers = map(lambda x: x.replace(\".weight_orig\", \"\"), all_layers)\n all_layers = map(lambda x: x.replace(\".weight\", \"\"), all_layers)\n\n # return filtered layers\n filter_regexp = \"(module\\\\.)?\" + \"(\" + filter_regexp + \")\"\n r = re.compile(filter_regexp)\n\n return list(filter(r.match, all_layers))\n\n\ndef get_param(module, layer_name, param_config):\n \"\"\"\n Given a quantization configuration, get the right parameter\n for the module to be quantized.\n\n Args:\n - module: a nn.Module\n - layer_name: the name of the layer\n - param_config: a dict like\n {\n 'Conv2d': ('kernel_size', {'(3, 3)': 9, '(1, 1)': 4}),\n 'Linear': ('in_features', {'*': 8})\n }\n For instance, all conv2d layers with kernel size 3x3 have\n a block size of 9 and all Linear layers are quantized with\n a block size of 8, irrespective of their size.\n\n Remarks:\n - if 'fuzzy_name' is passed as a parameter, layers whose layer_name\n include 'fuzzy_name' will be assigned the given parameter.\n In the following example, conv.expand layers will have a block\n size of 9 while conv.reduce will have a block size of 4 and all\n other layers will have a block size of 2.\n {\n 'Conv2d': ('fuzzy_name', {'expand': 9, 'reduce': 4, '*': 2}),\n 'Linear': ('fuzzy_name', {'classifier': 8, 'projection': 4})\n }\n\n \"\"\"\n\n layer_type = module.__class__.__name__\n\n if layer_type not in param_config:\n raise KeyError(f\"Layer type {layer_type} not in config for layer {module}\")\n\n feature, params = param_config[module.__class__.__name__]\n\n if feature != \"fuzzy_name\":\n feature_value = str(getattr(module, feature))\n if feature_value not in params:\n if \"*\" in params:\n feature_value = \"*\"\n else:\n raise KeyError(\n f\"{feature}={feature_value} not in config for layer {module}\"\n )\n else:\n feature_values = [name for name in params if name in layer_name]\n if len(feature_values) == 0:\n if \"*\" in params:\n feature_value = \"*\"\n else:\n raise KeyError(f\"name={layer_name} not in config for {module}\")\n else:\n feature_value = feature_values[0]\n\n return params[feature_value]\n\n\nclass SizeTracker(object):\n \"\"\"\n Class to keep track of the compressed network size with iPQ.\n\n Args:\n - model: a nn.Module\n\n Remarks:\n - The compressed size is the sum of three components\n for each layer in the network:\n (1) Storing the centroids given by iPQ in fp16\n (2) Storing the assignments of the blocks in int8\n (3) Storing all non-compressed elements such as biases\n - This cost in only valid if we use 256 centroids (then\n indexing can indeed by done with int8).\n \"\"\"\n\n def __init__(self, model):\n self.model = model\n self.size_non_compressed_model = self.compute_size()\n self.size_non_quantized = self.size_non_compressed_model\n self.size_index = 0\n self.size_centroids = 0\n self.n_quantized_layers = 0\n\n def compute_size(self):\n \"\"\"\n Computes the size of the model (in MB).\n \"\"\"\n\n res = 0\n for _, p in self.model.named_parameters():\n res += p.numel()\n return res * 4 / 1024 / 1024\n\n def update(self, W, block_size, n_centroids):\n \"\"\"\n Updates the running statistics when quantizing a new layer.\n \"\"\"\n\n # bits per weights\n bits_per_weight = np.log2(n_centroids) / block_size\n self.n_quantized_layers += 1\n\n # size of indexing the subvectors of size block_size (in MB)\n size_index_layer = bits_per_weight * W.numel() / 8 / 1024 / 1024\n self.size_index += size_index_layer\n\n # size of the centroids stored in float16 (in MB)\n size_centroids_layer = n_centroids * block_size * 2 / 1024 / 1024\n self.size_centroids += size_centroids_layer\n\n # size of non-compressed layers, e.g. LayerNorms or biases (in MB)\n size_uncompressed_layer = W.numel() * 4 / 1024 / 1024\n self.size_non_quantized -= size_uncompressed_layer\n\n def __repr__(self):\n size_compressed = (\n self.size_index + self.size_centroids + self.size_non_quantized\n )\n compression_ratio = self.size_non_compressed_model / size_compressed # NOQA\n return (\n f\"Non-compressed model size: {self.size_non_compressed_model:.2f} MB. \"\n f\"After quantizing {self.n_quantized_layers} layers, size \"\n f\"(indexing + centroids + other): {self.size_index:.2f} MB + \"\n f\"{self.size_centroids:.2f} MB + {self.size_non_quantized:.2f} MB = \"\n f\"{size_compressed:.2f} MB, compression ratio: {compression_ratio:.2f}x\"\n )\n\n\ndef attrsetter(*items):\n def resolve_attr(obj, attr):\n attrs = attr.split(\".\")\n head = attrs[:-1]\n tail = attrs[-1]\n\n for name in head:\n obj = getattr(obj, name)\n return obj, tail\n\n def g(obj, val):\n for attr in items:\n resolved_obj, resolved_attr = resolve_attr(obj, attr)\n setattr(resolved_obj, resolved_attr, val)\n\n return g\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq.data import Dictionary\nfrom fairseq.models import (\n FairseqDecoder,\n FairseqLanguageModel,\n register_model,\n register_model_architecture,\n)\n\n\n@register_model(\"dummy_model\")\nclass DummyModel(FairseqLanguageModel):\n def __init__(self, args, encoder):\n super().__init__(encoder)\n self.args = args\n\n @staticmethod\n def add_args(parser):\n parser.add_argument(\"--num-layers\", type=int, default=24)\n parser.add_argument(\"--embed-dim\", type=int, default=1024)\n\n @classmethod\n def build_model(cls, args, task):\n encoder = DummyEncoder(\n num_embed=len(task.target_dictionary),\n embed_dim=args.embed_dim,\n num_layers=args.num_layers,\n )\n return cls(args, encoder)\n\n def forward(self, src_tokens, masked_tokens=None, **kwargs):\n return self.decoder(src_tokens, masked_tokens=masked_tokens)\n\n\nclass DummyEncoder(FairseqDecoder):\n def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24):\n super().__init__(Dictionary())\n self.embed = nn.Embedding(\n num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0\n )\n self.layers_a = nn.ModuleList(\n [\n nn.Sequential(\n nn.LayerNorm(embed_dim),\n nn.Linear(embed_dim, 3 * embed_dim), # q, k, v input projection\n nn.Linear(3 * embed_dim, embed_dim), # skip self-attention\n nn.Linear(embed_dim, embed_dim), # output projection\n nn.Dropout(),\n )\n for i in range(num_layers)\n ]\n )\n self.layers_b = nn.ModuleList(\n [\n nn.Sequential(\n nn.LayerNorm(embed_dim),\n nn.Linear(embed_dim, 4 * embed_dim), # FFN\n nn.ReLU(),\n nn.Linear(4 * embed_dim, embed_dim), # FFN\n nn.Dropout(0.1),\n )\n for i in range(num_layers)\n ]\n )\n self.out_proj = nn.Linear(embed_dim, num_embed)\n\n def forward(self, tokens, masked_tokens=None):\n x = self.embed(tokens)\n for layer_a, layer_b in zip(self.layers_a, self.layers_b):\n x = x + layer_a(x)\n x = x + layer_b(x)\n x = self.out_proj(x)\n if masked_tokens is not None:\n x = x[masked_tokens]\n return (x,)\n\n def max_positions(self):\n return 1024\n\n def get_normalized_probs(self, net_output, log_probs, sample=None):\n logits = net_output[0].float()\n if log_probs:\n return F.log_softmax(logits, dim=-1)\n else:\n return F.softmax(logits, dim=-1)\n\n\n@register_model_architecture(\"dummy_model\", \"dummy_model\")\ndef base_architecture(args):\n pass\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport csv\nimport os\nimport os.path as op\nimport zipfile\nfrom functools import reduce\nfrom glob import glob\nfrom multiprocessing import cpu_count\nfrom typing import Any, Dict, List\n\nimport numpy as np\nimport pandas as pd\nimport sentencepiece as sp\nfrom fairseq.data.audio.audio_utils import _get_kaldi_fbank, _get_torchaudio_fbank\nfrom fairseq.data.audio.feature_transforms.utterance_cmvn import UtteranceCMVN\nfrom tqdm import tqdm\n\n\nUNK_TOKEN, UNK_TOKEN_ID = \"<unk>\", 3\nBOS_TOKEN, BOS_TOKEN_ID = \"<s>\", 0\nEOS_TOKEN, EOS_TOKEN_ID = \"</s>\", 2\nPAD_TOKEN, PAD_TOKEN_ID = \"<pad>\", 1\n\n\ndef gen_vocab(\n input_path: str, output_path_prefix: str, model_type=\"bpe\", vocab_size=1000,\n):\n # Train SentencePiece Model\n arguments = [\n f\"--input={input_path}\",\n f\"--model_prefix={output_path_prefix}\",\n f\"--model_type={model_type}\",\n f\"--vocab_size={vocab_size}\",\n \"--character_coverage=1.0\",\n f\"--num_threads={cpu_count()}\",\n f\"--unk_id={UNK_TOKEN_ID}\",\n f\"--bos_id={BOS_TOKEN_ID}\",\n f\"--eos_id={EOS_TOKEN_ID}\",\n f\"--pad_id={PAD_TOKEN_ID}\",\n ]\n sp.SentencePieceTrainer.Train(\" \".join(arguments))\n # Export fairseq dictionary\n spm = sp.SentencePieceProcessor()\n spm.Load(output_path_prefix + \".model\")\n vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}\n assert (\n vocab.get(UNK_TOKEN_ID) == UNK_TOKEN\n and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN\n and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN\n and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN\n )\n vocab = {\n i: s\n for i, s in vocab.items()\n if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}\n }\n with open(output_path_prefix + \".txt\", \"w\") as f_out:\n for _, s in sorted(vocab.items(), key=lambda x: x[0]):\n f_out.write(f\"{s} 1\\n\")\n\n\ndef extract_fbank_features(\n waveform,\n sample_rate,\n output_path=None,\n n_mel_bins=80,\n apply_utterance_cmvn=True,\n overwrite=False,\n):\n if output_path is not None and op.exists(output_path) and not overwrite:\n return\n\n _waveform = waveform * (2 ** 15) # Kaldi compliance: 16-bit signed integers\n _waveform = _waveform.squeeze().numpy()\n\n features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins)\n if features is None:\n features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins)\n if features is None:\n raise ImportError(\n \"Please install pyKaldi or torchaudio to enable \"\n \"online filterbank feature extraction\"\n )\n\n if apply_utterance_cmvn:\n cmvn = UtteranceCMVN(norm_means=True, norm_vars=True)\n features = cmvn(features)\n if output_path is not None:\n np.save(output_path, features)\n else:\n return features\n\n\ndef create_zip(data_root, zip_path):\n cwd = os.path.abspath(os.curdir)\n os.chdir(data_root)\n with zipfile.ZipFile(zip_path, \"w\", zipfile.ZIP_STORED) as f:\n for filename in tqdm(glob(\"*.npy\")):\n f.write(filename)\n os.chdir(cwd)\n\n\ndef is_npy_data(data: bytes) -> bool:\n return data[0] == 147 and data[1] == 78\n\n\ndef get_zip_manifest(zip_root, zip_filename):\n zip_path = op.join(zip_root, zip_filename)\n with zipfile.ZipFile(zip_path, mode=\"r\") as f:\n info = f.infolist()\n manifest = {}\n for i in tqdm(info):\n utt_id = op.splitext(i.filename)[0]\n offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size\n manifest[utt_id] = f\"{zip_filename}:{offset}:{file_size}\"\n with open(zip_path, \"rb\") as f:\n f.seek(offset)\n data = f.read(file_size)\n assert len(data) > 1 and is_npy_data(data)\n return manifest\n\n\ndef gen_config_yaml(\n data_root,\n spm_filename,\n yaml_filename=\"config.yaml\",\n specaugment_policy=\"lb\",\n prepend_tgt_lang_tag=False,\n sampling_alpha=1.0,\n):\n data_root = op.abspath(data_root)\n writer = S2TDataConfigWriter(op.join(data_root, yaml_filename))\n writer.set_audio_root(op.abspath(data_root))\n writer.set_vocab_filename(spm_filename.replace(\".model\", \".txt\"))\n writer.set_input_channels(1)\n writer.set_input_feat_per_channel(80)\n specaugment_setters = {\n \"lb\": writer.set_specaugment_lb_policy,\n \"ld\": writer.set_specaugment_ld_policy,\n \"sm\": writer.set_specaugment_sm_policy,\n \"ss\": writer.set_specaugment_ss_policy,\n }\n assert specaugment_policy in specaugment_setters\n specaugment_setters[specaugment_policy]()\n writer.set_bpe_tokenizer(\n {\n \"bpe\": \"sentencepiece\",\n \"sentencepiece_model\": op.join(data_root, spm_filename),\n }\n )\n if prepend_tgt_lang_tag:\n writer.set_prepend_tgt_lang_tag(True)\n writer.set_sampling_alpha(sampling_alpha)\n writer.set_feature_transforms(\"_train\", [\"specaugment\"])\n writer.flush()\n\n\ndef load_df_from_tsv(path: str):\n return pd.read_csv(\n path,\n sep=\"\\t\",\n header=0,\n encoding=\"utf-8\",\n escapechar=\"\\\\\",\n quoting=csv.QUOTE_NONE,\n na_filter=False,\n )\n\n\ndef save_df_to_tsv(dataframe, path):\n dataframe.to_csv(\n path,\n sep=\"\\t\",\n header=True,\n index=False,\n encoding=\"utf-8\",\n escapechar=\"\\\\\",\n quoting=csv.QUOTE_NONE,\n )\n\n\ndef filter_manifest_df(\n df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000\n):\n filters = {\n \"no speech\": df[\"audio\"] == \"\",\n f\"short speech (<{min_n_frames} frames)\": df[\"n_frames\"] < min_n_frames,\n \"empty sentence\": df[\"tgt_text\"] == \"\",\n }\n if is_train_split:\n filters[f\"long speech (>{max_n_frames} frames)\"] = df[\"n_frames\"] > max_n_frames\n if extra_filters is not None:\n filters.update(extra_filters)\n invalid = reduce(lambda x, y: x | y, filters.values())\n valid = ~invalid\n print(\n \"| \"\n + \", \".join(f\"{n}: {f.sum()}\" for n, f in filters.items())\n + f\", total {invalid.sum()} filtered, {valid.sum()} remained.\"\n )\n return df[valid]\n\n\nclass S2TDataConfigWriter(object):\n DEFAULT_VOCAB_FILENAME = \"dict.txt\"\n DEFAULT_INPUT_FEAT_PER_CHANNEL = 80\n DEFAULT_INPUT_CHANNELS = 1\n\n def __init__(self, yaml_path):\n try:\n import yaml\n except ImportError:\n print(\"Please install PyYAML to load YAML files for S2T data config\")\n self.yaml = yaml\n self.yaml_path = yaml_path\n self.config = {}\n\n def flush(self):\n with open(self.yaml_path, \"w\") as f:\n self.yaml.dump(self.config, f)\n\n def set_audio_root(self, audio_root=\"\"):\n self.config[\"audio_root\"] = audio_root\n\n def set_vocab_filename(self, vocab_filename=\"dict.txt\"):\n self.config[\"vocab_filename\"] = vocab_filename\n\n def set_specaugment(\n self,\n time_wrap_w: int,\n freq_mask_n: int,\n freq_mask_f: int,\n time_mask_n: int,\n time_mask_t: int,\n time_mask_p: float,\n ):\n self.config[\"specaugment\"] = {\n \"time_wrap_W\": time_wrap_w,\n \"freq_mask_N\": freq_mask_n,\n \"freq_mask_F\": freq_mask_f,\n \"time_mask_N\": time_mask_n,\n \"time_mask_T\": time_mask_t,\n \"time_mask_p\": time_mask_p,\n }\n\n def set_specaugment_lb_policy(self):\n self.set_specaugment(\n time_wrap_w=0,\n freq_mask_n=1,\n freq_mask_f=27,\n time_mask_n=1,\n time_mask_t=100,\n time_mask_p=1.0,\n )\n\n def set_specaugment_ld_policy(self):\n self.set_specaugment(\n time_wrap_w=0,\n freq_mask_n=2,\n freq_mask_f=27,\n time_mask_n=2,\n time_mask_t=100,\n time_mask_p=1.0,\n )\n\n def set_specaugment_sm_policy(self):\n self.set_specaugment(\n time_wrap_w=0,\n freq_mask_n=2,\n freq_mask_f=15,\n time_mask_n=2,\n time_mask_t=70,\n time_mask_p=0.2,\n )\n\n def set_specaugment_ss_policy(self):\n self.set_specaugment(\n time_wrap_w=0,\n freq_mask_n=2,\n freq_mask_f=27,\n time_mask_n=2,\n time_mask_t=70,\n time_mask_p=0.2,\n )\n\n def set_input_channels(self, input_channels=1):\n self.config[\"input_channels\"] = input_channels\n\n def set_input_feat_per_channel(self, input_feat_per_channel=80):\n self.config[\"input_feat_per_channel\"] = input_feat_per_channel\n\n def set_bpe_tokenizer(self, bpe_tokenizer: Dict[str, Any]):\n self.config[\"bpe_tokenizer\"] = bpe_tokenizer\n\n def set_feature_transforms(self, split, transforms: List[str]):\n if \"transforms\" not in self.config:\n self.config[\"transforms\"] = {}\n self.config[\"transforms\"][split] = transforms\n\n def set_prepend_tgt_lang_tag(self, flag=True):\n self.config[\"prepend_tgt_lang_tag\"] = flag\n\n def set_sampling_alpha(self, sampling_alpha=1.0):\n self.config[\"sampling_alpha\"] = sampling_alpha\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport unittest\n\nimport torch\nfrom fairseq.token_generation_constraints import *\n\n\ndef tensorize(constraints: List[List[int]]) -> torch.Tensor:\n return [torch.tensor(x) for x in constraints]\n\n\nclass TestHelperRoutines(unittest.TestCase):\n def setUp(self):\n self.examples = [\n ([[]], torch.tensor([[0]])),\n ([[], []], torch.tensor([[0], [0]])),\n ([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])),\n (\n [\n [\n torch.tensor([3, 1, 2]),\n torch.tensor([3]),\n torch.tensor([4, 5, 6, 7]),\n ],\n [],\n [torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])],\n ],\n torch.tensor(\n [\n [3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0],\n ]\n ),\n ),\n ]\n\n def test_packing(self):\n \"\"\"Ensures the list of lists of tensors gets packed correctly.\"\"\"\n for batch_constraints, expected_tensor in self.examples:\n packed = pack_constraints(batch_constraints)\n assert torch.equal(packed, expected_tensor)\n\n\nclass TestUnorderedConstraintState(unittest.TestCase):\n def setUp(self):\n # Tuples of (contraint set, expected printed graph, token counts per node)\n self.examples = [\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n \"([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))\",\n {1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1},\n ),\n ([], \"[None].False#0\", {}),\n (tensorize([[0]]), \"([None].False#1 [0].True#1)\", {0: 1}),\n (\n tensorize([[100000, 1, 2, 3, 4, 5]]),\n \"([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))\",\n {100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},\n ),\n (\n tensorize([[1, 2], [1, 2]]),\n \"([None].False#2 ([1].False#2 [2].True#2))\",\n {1: 2, 2: 2},\n ),\n (\n tensorize([[1, 2], [3, 4]]),\n \"([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))\",\n {1: 1, 2: 1, 3: 1, 4: 1},\n ),\n ]\n\n self.sequences = [\n (\n self.examples[0][0],\n [],\n {\"bank\": 0, \"num_completed\": 0, \"finished\": False, \"is_root\": True},\n ),\n (\n self.examples[0][0],\n [1, 2],\n {\"bank\": 2, \"num_completed\": 0, \"finished\": False, \"is_root\": False},\n ),\n (\n self.examples[0][0],\n [1, 2, 94],\n {\"bank\": 1, \"num_completed\": 1, \"finished\": False, \"is_root\": True},\n ),\n (\n self.examples[0][0],\n [1, 3, 999, 1, 4],\n {\"bank\": 4, \"num_completed\": 2, \"finished\": False, \"is_root\": False},\n ),\n (\n self.examples[0][0],\n [1, 3, 999, 1, 4, 999],\n {\"bank\": 4, \"num_completed\": 2, \"finished\": False, \"is_root\": True},\n ),\n (\n self.examples[0][0],\n [4, 5, 6, 8],\n {\"bank\": 2, \"num_completed\": 1, \"finished\": False, \"is_root\": True},\n ),\n (\n self.examples[0][0],\n # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5]\n # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]],\n [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],\n {\"bank\": 14, \"num_completed\": 6, \"finished\": True, \"is_root\": False},\n ),\n (\n self.examples[0][0],\n [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],\n {\"bank\": 14, \"num_completed\": 6, \"finished\": True, \"is_root\": True},\n ),\n (\n tensorize([[1], [2, 3]]),\n # Should not be able to get credit for entering 1 a second time\n [1, 1],\n {\"bank\": 1, \"num_completed\": 1, \"finished\": False, \"is_root\": True},\n ),\n (\n self.examples[4][0],\n [1, 2, 1, 2],\n {\"bank\": 4, \"num_completed\": 2, \"finished\": True, \"is_root\": False},\n ),\n (\n self.examples[4][0],\n [1, 2, 1, 2, 1],\n {\"bank\": 4, \"num_completed\": 2, \"finished\": True, \"is_root\": True},\n ),\n (\n self.examples[5][0],\n [1, 2, 3, 4, 5],\n {\"bank\": 4, \"num_completed\": 2, \"finished\": True, \"is_root\": True},\n ),\n ]\n\n def test_graphs(self):\n \"\"\"\n Test whether unordered graph systems are created correctly.\n \"\"\"\n for example in self.examples:\n constraints, expected, gold_counts = example\n c = ConstraintNode.create(constraints)\n assert (\n ConstraintNode.print_graph(c) == expected\n ), f\"got {ConstraintNode.print_graph(c)}, expected {expected}\"\n assert (\n c.token_counts() == gold_counts\n ), f\"{c} got {c.token_counts()} wanted {gold_counts}\"\n\n def test_next_tokens(self):\n \"\"\"\n Tests that the set of next tokens is correct.\n \"\"\"\n for example in self.examples:\n constraints, expected, gold_counts = example\n root = ConstraintNode.create(constraints)\n\n root_tokens = set(root.children.keys())\n for sequence in constraints:\n state = UnorderedConstraintState(root)\n for token in sequence:\n all_tokens = root_tokens.union(state.node.children.keys())\n assert (\n all_tokens == state.next_tokens()\n ), f\"ALL {all_tokens} NEXT {state.next_tokens()}\"\n state = state.advance(token)\n\n def test_sequences(self):\n for constraints, tokens, expected in self.sequences:\n state = UnorderedConstraintState.create(pack_constraints([constraints])[0])\n for token in tokens:\n state = state.advance(token)\n result = {}\n for attr in expected.keys():\n result[attr] = getattr(state, attr)\n\n assert (\n result == expected\n ), f\"TEST({tokens}) GOT: {result} WANTED: {expected}\"\n\n\nclass TestOrderedConstraintState(unittest.TestCase):\n def setUp(self):\n self.sequences = [\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n [],\n {\"bank\": 0, \"num_completed\": 0, \"finished\": False, \"is_root\": True},\n ),\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n [1, 2],\n {\"bank\": 2, \"num_completed\": 0, \"finished\": False, \"is_root\": False},\n ),\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n [1, 2, 94],\n {\"bank\": 0, \"num_completed\": 0, \"finished\": False, \"is_root\": True},\n ),\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n [1, 3, 999, 1, 4],\n {\"bank\": 0, \"num_completed\": 0, \"finished\": False, \"is_root\": True},\n ),\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n [1, 2, 3, 999, 999],\n {\"bank\": 3, \"num_completed\": 1, \"finished\": False, \"is_root\": False},\n ),\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n [1, 2, 3, 77, 1, 3, 1],\n {\"bank\": 6, \"num_completed\": 2, \"finished\": False, \"is_root\": False},\n ),\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5],\n {\"bank\": 14, \"num_completed\": 6, \"finished\": True, \"is_root\": False},\n ),\n (\n tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]),\n [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117],\n {\"bank\": 14, \"num_completed\": 6, \"finished\": True, \"is_root\": False},\n ),\n (\n tensorize([[1], [2, 3]]),\n [1, 1],\n {\"bank\": 1, \"num_completed\": 1, \"finished\": False, \"is_root\": False},\n ),\n (\n tensorize([[1, 2], [1, 2]]),\n [1, 2, 1, 2],\n {\"bank\": 4, \"num_completed\": 2, \"finished\": True, \"is_root\": False},\n ),\n (\n tensorize([[1, 2], [1, 2]]),\n [1, 2, 1, 2, 1],\n {\"bank\": 4, \"num_completed\": 2, \"finished\": True, \"is_root\": False},\n ),\n (\n tensorize([[1, 2], [3, 4]]),\n [1, 2, 3, 4, 5],\n {\"bank\": 4, \"num_completed\": 2, \"finished\": True, \"is_root\": False},\n ),\n ]\n\n def test_sequences(self):\n for i, (constraints, tokens, expected) in enumerate(self.sequences):\n state = OrderedConstraintState.create(pack_constraints([constraints])[0])\n for token in tokens:\n state = state.advance(token)\n result = {}\n for attr in expected.keys():\n result[attr] = getattr(state, attr)\n assert (\n result == expected\n ), f\"TEST({tokens}) GOT: {result} WANTED: {expected}\"\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.ones",
"torch.nn.functional.conv1d",
"torch.exp",
"torch.log",
"torch.arange",
"torch.cumsum"
],
[
"torch.nn.Dropout",
"numpy.random.random",
"torch.nn.functional.dropout",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.linear"
],
[
"torch.distributed.get_rank",
"torch.distributed.broadcast",
"numpy.log2",
"torch.distributed.is_initialized"
],
[
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.nn.functional.log_softmax",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.ReLU"
],
[
"pandas.read_csv",
"numpy.save"
],
[
"torch.equal",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rflieshman/BOLSTM | [
"c33a8b2a7722acb5e3ff55c3735591aea4f76f49"
] | [
"src/train_rnn.py"
] | [
"import random\nimport sys\nimport logging\nimport os\n\nimport collections\nimport numpy as np\n\nnp.random.seed(1)\nfrom tensorflow import set_random_seed\n\nset_random_seed(1)\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import model_from_json\nfrom keras.callbacks import Callback, LambdaCallback, ModelCheckpoint\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.preprocessing.text import one_hot\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score\n\nlogging.basicConfig(level=logging.WARNING)\nimport matplotlib\n\nlogger = logging.getLogger(\"matplotlib\")\nlogger.setLevel(logging.WARNING)\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\n\nsess = tf.Session(config=config)\nset_session(sess) # set this TensorFlow session as the default session for Keras\n\nfrom chebi_path import load_chebi\nfrom models import (\n get_model,\n get_xu_model,\n embbed_size,\n max_sentence_length,\n max_ancestors_length,\n n_classes,\n)\n\n# words_channel, wordnet_channel, common_ancestors_channel, concat_ancestors_channel\n\n\nDATA_DIR = \"data/\"\nSSTLIGHT_DIR = \"sst-light-0.4/\"\nMODELS_DIR = \"models/\"\nn_epochs = 100\nbatch_size = 64\nvalidation_split = 0.4\nPRINTERRORS = False\n\n# https://github.com/keras-team/keras/issues/853#issuecomment-343981960\n\n\ndef write_plots(history, modelname):\n \"\"\"\n Write plots regarding model training\n :param history: history object returned by fit function\n :param modelname: name of model to be used as part of filename\n \"\"\"\n plt.figure()\n plt.plot(history.history[\"f1\"])\n plt.plot(history.history[\"val_f1\"])\n plt.title(\"model eval\")\n plt.ylabel(\"score\")\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"upper left\")\n plt.savefig(\"temp/{}_acc.png\".format(modelname))\n\n plt.figure()\n plt.plot(history.history[\"loss\"])\n plt.plot(history.history[\"val_loss\"])\n plt.title(\"model loss\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"upper left\")\n plt.savefig(\"temp/{}_loss.png\".format(modelname))\n\n\ndef get_glove_vectors(filename=\"glove.6B.300d.txt\"):\n \"\"\"\n Open\n :param filename: file containing the word vectors trained with glove\n :return: index of each word and vectors\n \"\"\"\n embeddings_vectors = {\"\": np.zeros(embbed_size, dtype=\"float32\")} # words -> vector\n embedding_indexes = {\"\": 0}\n # load embeddings indexes: word -> coefs\n f = open(os.path.join(DATA_DIR, filename))\n # f = open(os.path.join(DATA_DIR, 'PubMed-and-PMC-w2v.txt'))\n for i, line in enumerate(f):\n # if i == 0:\n # continue\n values = line.split()\n word = values[0].lower()\n # if word.isdigit():\n # continue\n coefs = np.asarray(values[1:], dtype=\"float32\")\n embeddings_vectors[word] = coefs\n # print(word, coefs)\n embedding_indexes[word] = i + 1\n # print(i)\n print(len(embedding_indexes))\n f.close()\n print(\"Found %s word vectors.\" % len(embeddings_vectors))\n\n # assemble the embedding_weights in one numpy array\n # n_symbols = len(embedding_indexes) + 1 # adding 1 to account for 0th index (for masking)\n n_symbols = len(embedding_indexes) + 1\n embedding_weights = np.zeros((n_symbols, embbed_size))\n for word, index in embedding_indexes.items():\n # print(index, n_symbols)\n embedding_weights[index, :] = embeddings_vectors[word]\n\n return embedding_indexes, embedding_weights\n\n\ndef get_w2v(filename=\"{}/PubMed-w2v.bin\".format(DATA_DIR)):\n \"\"\"\n Open Word2Vec file using gensim package\n :return: word vectors in KeyedVectors gensim object\n \"\"\"\n # word_vectors = KeyedVectors.load_word2vec_format('data/PubMed-and-PMC-w2v.txt', binary=False) # C text format\n word_vectors = KeyedVectors.load_word2vec_format(\n filename, binary=True\n ) # C text format\n return word_vectors\n\n\ndef get_wordnet_indexes():\n \"\"\"\n Get the wordnet classes considered by SST, ignoring BI tags\n :return: embedding_indexes: tag -> index\n \"\"\"\n embedding_indexes = {}\n with open(\"{}/DATA/WNSS_07.TAGSET\".format(SSTLIGHT_DIR), \"r\") as f:\n lines = f.readlines()\n i = 0\n for l in lines:\n if l.startswith(\"I-\"):\n continue\n embedding_indexes[l.strip().split(\"-\")[-1]] = i\n i += 1\n # print(embedding_indexes)\n return embedding_indexes\n\n\ndef preprocess_sequences_glove(x_data, embeddings_index):\n \"\"\"\n Replace words in x_data with index of word in embeddings_index and pad sequence\n :param x_data: list of sequences of words (sentences)\n :param embeddings_index: word -> index in embedding matrix\n :return: matrix to be used as training data\n \"\"\"\n data = []\n for i, seq in enumerate(x_data):\n # for w in seq:\n # if w.lower() not in embeddings_index:\n # print(\"word not in index: {}\".format(w.lower()))\n # print(seq)\n # idxs = [embeddings_index.get(w.lower()) for w in seq if w.lower() in embeddings_index]\n idxs = [embeddings_index.get(w) for w in seq if w in embeddings_index]\n # idxs = [embeddings_index.vocab[w.lower()].index for w in seq if w.lower() in embeddings_index.vocab]\n if None in idxs:\n print(seq, idxs)\n # print(idxs)\n data.append(idxs)\n # print(data)\n data = pad_sequences(data, maxlen=max_sentence_length)\n return data\n\n\ndef preprocess_sequences(x_data, embeddings_index):\n \"\"\"\n Replace words in x_data with index of word in embeddings_index and pad sequence\n :param x_data: list of sequences of words (sentences)\n :param embeddings_index: word -> index in embedding matrix\n :return: matrix to be used as training data\n \"\"\"\n data = []\n for i, seq in enumerate(x_data):\n # for w in seq:\n # if w.lower() not in embeddings_index:\n # print(\"word not in index: {}\".format(w.lower()))\n # print(seq)\n # idxs = [embeddings_index.get(w.lower()) for w in seq if w.lower() in embeddings_index]\n # idxs = [embeddings_index.vocab[w.lower()].index for w in seq if w.lower() in embeddings_index.vocab]\n idxs = []\n for w in seq:\n if w.lower() in embeddings_index.vocab:\n idxs.append(embeddings_index.vocab[w.lower()].index)\n if None in idxs:\n print(seq, idxs)\n # print(idxs)\n data.append(idxs)\n # print(data)\n data = pad_sequences(data, maxlen=max_sentence_length, padding=\"post\")\n return data\n\n\ndef preprocess_ids(x_data, id_to_index, maxlen):\n \"\"\"\n process a sequence of ontology:IDs, so an embedding index is not necessary\n :param x_data:\n :param id_to_index:\n :param maxlen:\n :return: matrix to be used as training data\n \"\"\"\n #\n data = []\n for i, seq in enumerate(x_data):\n # print(seq)\n idxs = [\n id_to_index[d.replace(\"_\", \":\")] for d in seq if d and d.startswith(\"CHEBI\")\n ]\n data.append(idxs)\n data = pad_sequences(data, maxlen=maxlen)\n return data\n\n\nclass Metrics(Callback):\n \"\"\"\n Implementation of P, R and F1 metrics for fit function callback\n \"\"\"\n\n def __init__(self, labels, words, n_inputs, **kwargs):\n self.labels = labels\n self.words_left = words[0]\n self.words_right = words[1]\n self.n_inputs = n_inputs\n self._val_f1 = 0\n self._val_recall = 0\n self._val_precision = 0\n super(Metrics, self).__init__()\n\n def on_train_begin(self, logs={}):\n self.val_f1s = []\n self.val_recalls = []\n self.val_precisions = []\n\n def on_epoch_end(self, epoch, logs={}):\n # print(dir(self.model))\n # print(len(self.validation_data))\n val_predict = (\n np.asarray(\n self.model.predict(\n [self.validation_data[i] for i in range(self.n_inputs)]\n )\n )\n ).round()\n val_targ = self.validation_data[self.n_inputs]\n # val_targ = self.validation_data[1]\n # probs = np.asarray(self.model.predict([self.validation_data[0], self.validation_data[1]],\n # ))\n self._val_f1 = f1_score(\n val_targ[..., 1:], val_predict[..., 1:], average=\"macro\"\n )\n self._val_recall = recall_score(\n val_targ[..., 1:], val_predict[..., 1:], average=\"macro\"\n )\n self._val_precision = precision_score(\n val_targ[..., 1:], val_predict[..., 1:], average=\"macro\"\n )\n _confusion_matrix = confusion_matrix(\n val_targ.argmax(axis=1), val_predict.argmax(axis=1)\n )\n self.val_f1s.append(self._val_f1)\n self.val_f1s.append(self._val_f1)\n self.val_recalls.append(self._val_recall)\n self.val_precisions.append(self._val_precision)\n s = \"predicted not false: {}/{}\\n{}\\n\".format(\n len([x for x in val_predict if np.argmax(x) != 0]),\n len([x for x in val_targ if x[0] < 0.5]),\n _confusion_matrix,\n )\n print(\n \"\\n{} VAL_f1:{:6.3f} VAL_p:{:6.3f} VAL_r{:6.3f}\\n\".format(\n s, self._val_f1, self._val_precision, self._val_recall\n )\n )\n\n return\n\n\ndef get_ddi_data(dirs, name_to_id, synonym_to_id, id_to_name):\n \"\"\"\n Generate data instances for the documents in the subdirectories of the corpus\n :param dirs: list of directories to be scanned for XML files\n :return: column vectors where each element corresponds to a label or a sequence of values of a particular\n data instance.\n \"\"\"\n\n # import function to process a directory with XML DDI files\n from parse_ddi import get_ddi_sdp_instances\n\n # dirs = [\"data/DDICorpus/Test/DDIExtraction/DrugBank/\", \"data/DDICorpus/Test/DDIExtraction/MedLine/\"]\n labels = []\n # instances = np.empty((0, max_sentence_length, embbed_size))\n # instances = np.empty((0, max_sentence_length))\n left_instances = [] # word indexes\n right_instances = []\n common_ancestors = [] # ontology IDs\n left_ancestors = []\n right_ancestors = []\n left_wordnet = [] # wordnet IDs\n right_wordnet = []\n all_pos_gv = set() # anti positive governors\n all_neg_gv = set()\n classes = np.empty((0,))\n\n for dir in dirs:\n if not os.path.isdir(dir):\n print(\"{} does not exist!\".format(dir))\n sys.exit()\n dir_labels, dir_instances, dir_classes, dir_common, dir_ancestors, dir_wordnet, neg_gv, pos_gv = get_ddi_sdp_instances(\n dir, name_to_id, synonym_to_id, id_to_name\n )\n # dir_instances = np.array(dir_instances)\n # print(dir_instances)\n # dir_instances = sequence.pad_sequences(dir_instances, maxlen=max_sentence_length)\n dir_classes = np.array(dir_classes)\n\n labels += dir_labels\n # print(instances.shape, dir_instances.shape)\n # instances = np.concatenate((instances, dir_instances), axis=0)\n left_instances += dir_instances[0]\n right_instances += dir_instances[1]\n common_ancestors += dir_common\n left_ancestors += dir_ancestors[0]\n right_ancestors += dir_ancestors[1]\n left_wordnet += dir_wordnet[0]\n right_wordnet += dir_wordnet[1]\n classes = np.concatenate((classes, dir_classes), axis=0)\n\n all_pos_gv.update(pos_gv)\n all_neg_gv.update(neg_gv)\n\n return (\n labels,\n (left_instances, right_instances),\n classes,\n common_ancestors,\n (left_ancestors, right_ancestors),\n (left_wordnet, right_wordnet),\n )\n\n\ndef prepare_inputs(channels, input_data, list_order, id_to_index):\n Y_train, train_labels, X_words_train, X_wordnet_train, X_subpaths_train, X_ancestors_train = (\n input_data\n )\n inputs = {}\n if \"words\" in channels:\n # emb_index, emb_matrix = get_glove_vectors()\n word_vectors = get_w2v()\n w2v_layer = word_vectors.get_keras_embedding(train_embeddings=False)\n X_words_left = preprocess_sequences(X_words_train[0], word_vectors)\n X_words_right = preprocess_sequences(X_words_train[1], word_vectors)\n del word_vectors\n inputs[\"left_words\"] = X_words_left[list_order]\n inputs[\"right_words\"] = X_words_right[list_order]\n else:\n emb_matrix = None\n w2v_layer = None\n\n if \"wordnet\" in channels:\n wn_index = get_wordnet_indexes()\n X_wn_left = preprocess_sequences_glove(X_wordnet_train[0], wn_index)\n X_wn_right = preprocess_sequences_glove(X_wordnet_train[1], wn_index)\n inputs[\"left_wordnet\"] = X_wn_left[list_order]\n inputs[\"right_wordnet\"] = X_wn_right[list_order]\n else:\n wn_index = None\n\n if \"concat_ancestors\" in channels or \"common_ancestors\" in channels:\n # is_a_graph, name_to_id, synonym_to_id, id_to_name, id_to_index = load_chebi()\n X_ids_left = preprocess_ids(\n X_subpaths_train[0], id_to_index, max_ancestors_length\n )\n X_ids_right = preprocess_ids(\n X_subpaths_train[1], id_to_index, max_ancestors_length\n )\n X_ancestors = preprocess_ids(\n X_ancestors_train, id_to_index, max_ancestors_length * 2\n )\n # X_ancestors_train = np.concatenate((X_ids_left, X_ids_right[..., 1:]), 1)\n inputs[\"left_ancestors\"] = X_ids_left[list_order]\n inputs[\"right_ancestors\"] = X_ids_right[list_order]\n inputs[\"common_ancestors\"] = X_ancestors[list_order]\n\n return inputs, w2v_layer, wn_index\n\n\ndef train(modelname, channels, train_inputs, id_to_index, test_inputs=None):\n # open numpy arrays with data and train model\n Y_train, train_labels, X_words_train, X_wordnet_train, X_subpaths_train, X_ancestors_train = (\n train_inputs\n )\n if test_inputs is not None:\n Y_test, test_labels, X_words_test, X_wordnet_test, X_subpaths_test, X_ancestors_test = (\n test_inputs\n )\n # number of input channels is determined by args after corpus_name and model_name\n n_inputs = 0\n if \"words\" in channels:\n n_inputs += 2\n if \"wordnet\" in channels:\n n_inputs += 2\n if \"concat_ancestors\" in channels:\n n_inputs += 2\n if \"common_ancestors\" in channels:\n n_inputs += 1\n\n # remove previous model files\n if os.path.isfile(\"{}/{}.json\".format(MODELS_DIR, modelname)):\n os.remove(\"{}/{}.json\".format(MODELS_DIR, modelname))\n if os.path.isfile(\"{}/{}.h5\".format(MODELS_DIR, modelname)):\n os.remove(\"{}/{}.h5\".format(MODELS_DIR, modelname))\n\n # print(train_labels)\n Y_train = to_categorical(Y_train, num_classes=n_classes)\n\n # get random instance order (to shuffle docs types and labels)\n list_order = np.arange(len(Y_train))\n # print(list_order)\n random.seed(1)\n random.shuffle(list_order)\n Y_train = Y_train[list_order]\n train_labels = train_labels[list_order]\n print(\"train order:\", list_order)\n\n # store features in this dictionry according to args\n\n inputs, w2v_layer, wn_index = prepare_inputs(\n channels, train_inputs, list_order, id_to_index\n )\n\n if test_inputs is not None:\n Y_test = to_categorical(Y_test, num_classes=n_classes)\n test_list_order = np.arange(len(Y_test))\n test_inputs, w2v_layer_test, wn_index_test = prepare_inputs(\n channels, test_inputs, test_list_order, id_to_index\n )\n\n model = get_model(w2v_layer, channels, wn_index, id_to_index)\n del id_to_index\n del wn_index\n # serialize model to JSON\n model_json = model.to_json()\n with open(\"{}/{}.json\".format(MODELS_DIR, modelname), \"w\") as json_file:\n json_file.write(model_json)\n # alternative models\n # model = get_words_model(emb_matrix)\n # model = get_xu_model(emb_matrix)\n\n metrics = Metrics(train_labels, X_words_train, n_inputs)\n checkpointer = ModelCheckpoint(\n filepath=\"{}/{}.h5\".format(MODELS_DIR, modelname),\n verbose=1,\n save_best_only=True,\n )\n\n if test_inputs is None:\n history = model.fit(\n inputs,\n {\"output\": Y_train},\n validation_split=validation_split,\n epochs=n_epochs,\n batch_size=batch_size,\n verbose=2,\n callbacks=[metrics, checkpointer],\n )\n else:\n history = model.fit(\n inputs,\n {\"output\": Y_train},\n validation_data=(test_inputs, Y_test),\n epochs=n_epochs,\n batch_size=batch_size,\n verbose=2,\n callbacks=[metrics, checkpointer],\n )\n\n # keras.callbacks.EarlyStopping(patience=3)])\n write_plots(history, modelname)\n\n # serialize weights to HDF5 - weights are saved using checkpointer\n # model.save_weights(\"{}.h5\".format(sys.argv[3]))\n print(\"Saved model to disk\")\n\n\ndef predict(\n modelname,\n corpusname,\n outputpath,\n channels,\n test_labels,\n X_words_test,\n X_wn_test,\n X_subpaths_test,\n X_ancestors_test,\n id_to_index,\n):\n inputs = {}\n print(channels)\n if \"words\" in channels:\n # emb_index, emb_matrix = get_glove_vectors()\n # emb_index, emb_matrix = None, None\n word_vectors = get_w2v()\n X_words_test_left = preprocess_sequences(\n [[\"drug\"] + x[1:] for x in X_words_test[0]], word_vectors\n )\n X_words_test_right = preprocess_sequences(\n [x[:-1] + [\"drug\"] for x in X_words_test[1]], word_vectors\n )\n del word_vectors\n inputs[\"left_words\"] = X_words_test_left\n inputs[\"right_words\"] = X_words_test_right\n if \"wordnet\" in channels:\n wn_index = get_wordnet_indexes()\n X_wordnet_test_left = preprocess_sequences_glove(X_wn_test[0], wn_index)\n X_wordnet_test_right = preprocess_sequences_glove(X_wn_test[1], wn_index)\n del wn_index\n inputs[\"left_wordnet\"] = X_wordnet_test_left\n inputs[\"right_wordnet\"] = X_wordnet_test_right\n\n if \"common_ancestors\" in channels or \"concat_ancestors\" in channels:\n X_ids_left = preprocess_ids(\n X_subpaths_test[0], id_to_index, max_ancestors_length\n )\n X_ids_right = preprocess_ids(\n X_subpaths_test[1], id_to_index, max_ancestors_length\n )\n X_ancestors = preprocess_ids(\n X_ancestors_test, id_to_index, max_ancestors_length * 2\n )\n del id_to_index\n inputs[\"left_ancestors\"] = X_ids_left\n inputs[\"right_ancestors\"] = X_ids_right\n inputs[\"common_ancestors\"] = X_ancestors\n\n # load json and create model\n json_file = open(\"{}/{}.json\".format(MODELS_DIR, modelname), \"r\")\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n loaded_model.load_weights(\"{}/{}.h5\".format(MODELS_DIR, modelname))\n print(\"Loaded model {}/{} from disk\".format(MODELS_DIR, modelname))\n\n # test_labels = np.load(sys.argv[2] + \"_labels.npy\")\n\n # scores = loaded_model.predict(X_words_test)\n scores = loaded_model.predict(inputs)\n # write results to file\n # assuming DDI results\n from parse_ddi import pairtype_tolabel\n\n if outputpath.endswith(\"/\"):\n outputpath = outputpath[:-1]\n if not os.path.exists(outputpath):\n print(\"created output path:\", outputpath)\n os.makedirs(outputpath, exist_ok=False)\n with open(\n \"{}/{}_{}results.txt\".format(\n outputpath, modelname, corpusname.replace(\"/\", \".\")\n ),\n \"w\",\n ) as f:\n f.write(\"\\t\".join([\"entity1\", \"entity2\", \"pred_class\\n\"]))\n for i, pair in enumerate(test_labels):\n f.write(\n \"\\t\".join(\n (pair[0], pair[1], pairtype_tolabel[(np.argmax(scores[i]))] + \"\\n\")\n )\n )\n\n\ndef main():\n if sys.argv[1].startswith(\"preprocessing\"):\n # generate data instances and write to disk as numpy arrays\n # args: corpus_type (semeval8 or ddi) corpus_name1 (corpus_name2) (...)\n # e.g. python3 src/train_rnn.py preprocessing ddi temp/dditrain data/DDICorpus/Train/MedLine/ data/DDICorpus/Train/DrugBank/\n train_data = True\n if \"test\" in sys.argv[3].lower():\n train_data = False\n # TODO: generalize text pre-processing\n if sys.argv[2] == \"semeval8\":\n from parse_semeval8 import get_semeval8_sdp_instances\n\n train_labels, X_train, classes, X_train_ancestors, X_train_wordnet = get_semeval8_sdp_instances(\n sys.argv[4:], train=train_data\n )\n print(len(X_train))\n print(len(X_train[0]))\n elif sys.argv[2] == \"ddi\":\n is_a_graph, name_to_id, synonym_to_id, id_to_name, id_to_index = load_chebi(\n \"{}/chebi.obo\".format(DATA_DIR)\n )\n train_labels, X_train, Y_train, X_train_ancestors, X_train_subpaths, X_train_wordnet = get_ddi_data(\n [sys.argv[4]], name_to_id, synonym_to_id, id_to_name\n )\n del id_to_name\n del synonym_to_id\n del name_to_id\n del is_a_graph\n # print(len(X_train))\n\n if sys.argv[1].endswith(\"_train\"):\n train_labels = np.array(train_labels)\n train(\n sys.argv[3],\n sys.argv[5:],\n (\n Y_train,\n train_labels,\n X_train,\n X_train_wordnet,\n X_train_subpaths,\n X_train_ancestors,\n ),\n id_to_index,\n )\n elif sys.argv[1].endswith(\"_predict\"):\n train_labels = np.array(train_labels)\n predict(\n sys.argv[3],\n sys.argv[4],\n sys.argv[5],\n sys.argv[6:],\n train_labels,\n X_train,\n X_train_wordnet,\n X_train_subpaths,\n X_train_ancestors,\n id_to_index,\n )\n else:\n if sys.argv[2] == \"ddi\":\n np.save(sys.argv[3] + \"_x_ancestors.npy\", X_train_ancestors)\n np.save(sys.argv[3] + \"_x_subpaths.npy\", X_train_subpaths)\n np.save(sys.argv[3] + \"_labels.npy\", train_labels)\n np.save(sys.argv[3] + \"_x_words.npy\", X_train)\n np.save(sys.argv[3] + \"_x_wordnet.npy\", X_train_wordnet)\n np.save(sys.argv[3] + \"_y.npy\", Y_train)\n\n elif sys.argv[1] == \"train\":\n is_a_graph, name_to_id, synonym_to_id, id_to_name, id_to_index = load_chebi(\n \"{}/chebi.obo\".format(DATA_DIR)\n )\n train_labels = np.load(sys.argv[2] + \"_labels.npy\")\n Y_train = np.load(sys.argv[2] + \"_y.npy\")\n # Y_train = to_categorical(Y_train, num_classes=n_classes)\n X_words_train = None\n X_wordnet_train = None\n X_subpaths_train = None\n X_ancestors_train = None\n if \"words\" in sys.argv[4:]:\n X_words_train = np.load(sys.argv[2] + \"_x_words.npy\")\n if \"wordnet\" in sys.argv[4:]:\n X_wordnet_train = np.load(sys.argv[2] + \"_x_wordnet.npy\")\n if \"concat_ancestors\" in sys.argv[4:] or \"common_ancestors\" in sys.argv[4:]:\n X_subpaths_train = np.load(sys.argv[2] + \"_x_subpaths.npy\")\n X_ancestors_train = np.load(sys.argv[2] + \"_x_ancestors.npy\")\n\n train_inputs = (\n Y_train,\n train_labels,\n X_words_train,\n X_wordnet_train,\n X_subpaths_train,\n X_ancestors_train,\n )\n train(sys.argv[3], sys.argv[4:], train_inputs, id_to_index)\n\n elif sys.argv[1] == \"train_test\":\n is_a_graph, name_to_id, synonym_to_id, id_to_name, id_to_index = load_chebi(\n \"{}/chebi.obo\".format(DATA_DIR)\n )\n\n train_labels = np.load(sys.argv[2] + \"_labels.npy\")\n test_labels = np.load(sys.argv[3] + \"_labels.npy\")\n Y_train = np.load(sys.argv[2] + \"_y.npy\")\n Y_test = np.load(sys.argv[3] + \"_y.npy\")\n # Y_train = to_categorical(Y_train, num_classes=n_classes)\n X_words_train = None\n X_wordnet_train = None\n X_subpaths_train = None\n X_ancestors_train = None\n\n X_words_test = None\n X_wordnet_test = None\n X_subpaths_test = None\n X_ancestors_test = None\n\n if \"words\" in sys.argv[5:]:\n X_words_train = np.load(sys.argv[2] + \"_x_words.npy\")\n X_words_test = np.load(sys.argv[3] + \"_x_words.npy\")\n if \"wordnet\" in sys.argv[5:]:\n X_wordnet_train = np.load(sys.argv[2] + \"_x_wordnet.npy\")\n X_wordnet_test = np.load(sys.argv[3] + \"_x_wordnet.npy\")\n if \"concat_ancestors\" in sys.argv[5:] or \"common_ancestors\" in sys.argv[4:]:\n X_subpaths_train = np.load(sys.argv[2] + \"_x_subpaths.npy\")\n X_ancestors_test = np.load(sys.argv[3] + \"_x_ancestors.npy\")\n\n train(\n sys.argv[4],\n sys.argv[5:],\n (\n Y_train,\n train_labels,\n X_words_train,\n X_wordnet_train,\n X_subpaths_train,\n X_ancestors_train,\n ),\n id_to_index,\n (\n Y_test,\n test_labels,\n X_words_test,\n X_wordnet_test,\n X_subpaths_test,\n X_ancestors_test,\n ),\n )\n\n elif sys.argv[1] == \"predict\":\n # open numpy files according to the input channels specified, open model files and apply model to data\n is_a_graph, name_to_id, synonym_to_id, id_to_name, id_to_index = load_chebi(\n \"{}/chebi.obo\".format(DATA_DIR)\n )\n X_words_test = None\n X_wordnet_test = None\n X_subpaths_test = None\n X_ancestors_test = None\n if \"words\" in sys.argv[4:]:\n X_words_test = np.load(sys.argv[2] + \"_x_words.npy\")\n if \"wordnet\" in sys.argv[4:]:\n X_wn_test = np.load(sys.argv[2] + \"_x_wordnet.npy\")\n if \"common_ancestors\" in sys.argv[4:] or \"concat_ancestors\" in sys.argv[4:]:\n X_ancestors_test = np.load(sys.argv[2] + \"_x_ancestors.npy\")\n X_subpaths_test = np.load(sys.argv[2] + \"_x_subpaths.npy\")\n test_labels = np.load(sys.argv[2] + \"_labels.npy\")\n predict(\n sys.argv[3],\n sys.argv[2],\n \"results/\",\n sys.argv[4:],\n test_labels,\n X_words_test,\n X_wn_test,\n X_subpaths_test,\n X_ancestors_test,\n id_to_index,\n )\n\n elif sys.argv[1] == \"dummy_predict\":\n test_labels = np.load(sys.argv[2] + \"_labels.npy\")\n with open(\"{}_results.txt\".format(sys.argv[2].split(\"/\")[-1]), \"w\") as f:\n for i, pair in enumerate(test_labels):\n f.write(\" \".join((pair[0], pair[1], \"3\")) + \"\\n\")\n\n elif sys.argv[1] == \"showdata\":\n if sys.argv[3].isdigit(): # limit this number of instances\n limit = int(sys.argv[3])\n target = None\n else:\n target = sys.argv[3] # print instanes with this entity\n limit = None\n X_words_train = np.load(sys.argv[2] + \"_x_words.npy\")\n X_ancestors_train = np.load(sys.argv[2] + \"_x_ancestors.npy\")\n X_subpaths_train = np.load(sys.argv[2] + \"_x_subpaths.npy\")\n X_wordnet_train = np.load(sys.argv[2] + \"_x_wordnet.npy\")\n Y_train = np.load(sys.argv[2] + \"_y.npy\")\n train_labels = np.load(sys.argv[2] + \"_labels.npy\")\n\n if limit:\n print(\"labels:\")\n print(train_labels[:limit])\n print()\n print(\"left words:\")\n print(X_words_train[0][:limit])\n print(\"right words:\")\n print(X_words_train[1][:limit])\n print()\n print(\"chebi ancestors:\")\n print(len(X_subpaths_train))\n print(len(X_ancestors_train[0]))\n print(X_ancestors_train[:limit])\n print()\n print(\"chebi subpaths\")\n print(\"left\")\n print(X_subpaths_train[0][:limit])\n print(\"right\")\n print(X_subpaths_train[1][:limit])\n print()\n\n print(\"wordnet:\")\n print(X_wordnet_train[0][:limit])\n print(X_wordnet_train[1][:limit])\n print()\n\n print(\"classes\")\n print(Y_train[:limit])\n analyze_entity_distances(train_labels, Y_train, X_words_train)\n print(\"class distribution\")\n counter = collections.Counter(Y_train)\n print(counter)\n print(counter[1] + counter[2] + counter[3] + counter[4])\n # analyze_sdps(Y_train, X_words_train)\n # print([(X_words_train[0][i], X_words_train[1][i]) for i, l in enumerate(train_labels) if 'DDI-DrugBank.d769.s2.e1' in l])\n # analyze_lens(Y_train, X_words_train, X_wordnet_train, X_subpaths_train, X_ancestors_train)\n\n else:\n train_labels = [(i, t) for i, t in enumerate(train_labels) if target in t]\n for (i, l) in train_labels:\n print()\n print()\n print(l)\n print(\"left words:\")\n print(X_words_train[0][i])\n print(\"right words:\")\n print(X_words_train[1][i])\n # print()\n print(\"classes\")\n print(Y_train[i])\n print(\"wordnet:\")\n print(X_wordnet_train[0][i])\n print(X_wordnet_train[1][i])\n print()\n print(\"chebi ancestors:\")\n # print(len(X_subpaths_train))\n # print(len(X_ancestors_train[0]))\n print(X_ancestors_train[i], len(X_ancestors_train[i]))\n print()\n print(\"chebi subpaths\")\n print(\"left\")\n print(X_subpaths_train[0][i], len(X_subpaths_train[0][i]))\n print(\"right\")\n print(X_subpaths_train[1][i], len(X_subpaths_train[1][i]))\n print()\n\n\ndef analyze_sdps(Y_train, X_words_train):\n pos_sdps = {}\n neg_sdps = {}\n for i, p in enumerate(Y_train):\n sdp_len = len(X_words_train[0][i]) + len(X_words_train[1][i])\n if p != 0:\n pos_sdps[sdp_len] = pos_sdps.get(sdp_len, 0) + 1\n else:\n neg_sdps[sdp_len] = neg_sdps.get(sdp_len, 0) + 1\n # print(\"positive SDPs with length shorter than {}: {}\".format(threshold, pos_short_sdps))\n # print(short_sdps/len([y for y in Y_train if y != 0]))\n # print(\"negative SDPs with length shorter than {}: {}\".format(threshold, short_sdps - pos_short_sdps))\n # print((short_sdps - pos_short_sdps) / len([y for y in Y_train if y == 0]))\n print(\"pos sdps:\", sum(pos_sdps.values()))\n od = collections.OrderedDict(sorted(pos_sdps.items()))\n for k, v in od.items():\n print(\n k,\n v,\n round(v / sum(pos_sdps.values()), 3),\n round(v / (v + neg_sdps.get(k, 0)), 3),\n )\n print()\n print(\"neg sdps:\", sum(neg_sdps.values()))\n od = collections.OrderedDict(sorted(neg_sdps.items()))\n for k, v in od.items():\n print(\n k,\n v,\n round(v / sum(neg_sdps.values()), 3),\n round(v / (v + pos_sdps.get(k, 0)), 3),\n )\n print()\n\n\ndef analyze_entity_distances(train_labels, Y_train, X_words_train):\n entity_id_max = 10\n print()\n print(\"entity id distribution of entities in positive pairs\")\n c = {}\n for i, p in enumerate(train_labels):\n if Y_train[i] == 0:\n continue\n eid1 = int(p[0].split(\"e\")[-1])\n eid2 = int(p[1].split(\"e\")[-1])\n if eid1 not in c:\n c[eid1] = 0\n if eid2 not in c:\n c[eid2] = 0\n c[eid1] += 1\n c[eid2] += 2\n for e in c:\n print(e, c[e], c[e] / sum(c.values()))\n print(\"percentage of entities with id>{}\".format(entity_id_max))\n print(sum([c[e] / sum(c.values()) for e in c if e > entity_id_max]))\n print()\n\n print()\n print(\"entity id distribution of entities in all pairs\")\n c = {}\n for i, p in enumerate(train_labels):\n eid1 = int(p[0].split(\"e\")[-1])\n eid2 = int(p[1].split(\"e\")[-1])\n if eid1 not in c:\n c[eid1] = 0\n if eid2 not in c:\n c[eid2] = 0\n c[eid1] += 1\n c[eid2] += 2\n for e in c:\n print(e, c[e], c[e] / sum(c.values()))\n print(\"percentage of entities with id>{}\".format(entity_id_max))\n print(sum([c[e] / sum(c.values()) for e in c if e > entity_id_max]))\n print()\n\n digits1 = [\n x for x in X_words_train[0] if any([y.replace(\".\", \"\").isdigit() for y in x])\n ]\n digits2 = [\n x for x in X_words_train[1] if any([y.replace(\".\", \"\").isdigit() for y in x])\n ]\n print(digits1, digits2)\n print(len(digits1), len(digits2))\n print(len(X_words_train[0]), len(X_words_train[1]))\n\n\ndef analyze_lens(\n Y_train, X_words_train, X_wordnet_train, X_subpaths_train, X_ancestors_train\n):\n pos = 0\n neg = 0\n pos_word_left = 0\n pos_word_right = 0\n neg_word_left = 0\n neg_word_right = 0\n pos_wordnet_left = 0\n pos_wordnet_right = 0\n neg_wordnet_left = 0\n neg_wordnet_right = 0\n pos_chebi_left = 0\n pos_chebi_right = 0\n neg_chebi_left = 0\n neg_chebi_right = 0\n pos_common = 0\n neg_common = 0\n\n for i, y in enumerate(Y_train):\n if y == 0:\n neg += 1\n neg_word_left += len(X_words_train[0][i])\n neg_word_right += len(X_words_train[1][i])\n neg_wordnet_left += len(X_wordnet_train[0][i])\n neg_wordnet_right += len(X_wordnet_train[1][i])\n neg_chebi_left += len(X_subpaths_train[0][i])\n neg_chebi_right += len(X_subpaths_train[1][i])\n neg_common += len(X_ancestors_train[i])\n else:\n pos_word_left += len(X_words_train[0][i])\n pos_word_right += len(X_words_train[1][i])\n pos_wordnet_left += len(X_wordnet_train[0][i])\n pos_wordnet_right += len(X_wordnet_train[1][i])\n pos_chebi_left += len(X_subpaths_train[0][i])\n pos_chebi_right += len(X_subpaths_train[1][i])\n pos_common += len(X_ancestors_train[i])\n pos += 1\n pos_values = [\n pos_word_left,\n pos_word_right,\n pos_wordnet_left,\n pos_wordnet_right,\n pos_chebi_left,\n pos_chebi_right,\n pos_common,\n ]\n print(\"positive pairs\\n: {}\".format(\"\\n\".join([str(x / pos) for x in pos_values])))\n\n neg_values = [\n neg_word_left,\n neg_word_right,\n neg_wordnet_left,\n neg_wordnet_right,\n neg_chebi_left,\n neg_chebi_right,\n neg_common,\n ]\n print(\"negitive pairs\\n: {}\".format(\"\\n\".join([str(x / neg) for x in neg_values])))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.asarray",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"sklearn.metrics.f1_score",
"numpy.save",
"tensorflow.ConfigProto",
"numpy.argmax",
"tensorflow.Session",
"numpy.load",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"sklearn.metrics.precision_score",
"tensorflow.set_random_seed",
"numpy.array",
"sklearn.metrics.recall_score",
"matplotlib.pyplot.ylabel",
"numpy.random.seed",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
voxilady/tensorflow | [
"ba730a4f6de09ab8635091517933462dc70e4443"
] | [
"tensorflow/python/ops/parallel_for/control_flow_ops_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for pfor and for_loop.\"\"\"\n# pylint: disable=g-direct-tensorflow-import\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport time\n\nfrom absl import flags\nimport numpy as np\n\nfrom tensorflow.core.example import example_pb2\nfrom tensorflow.core.example import feature_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import bitwise_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import gradients as gradient_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import map_fn\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops\nfrom tensorflow.python.ops.parallel_for.test_util import PForTestCase\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import nest\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PForTest(PForTestCase):\n\n def test_op_conversion_fallback_to_while_loop(self):\n # Note that we used top_k op for this test. If a converter gets defined for\n # it, we will need to find another op for which a converter has not been\n # defined.\n x = random_ops.random_uniform([3, 2, 4])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n return nn.top_k(x_i)\n\n with self.assertRaisesRegexp(ValueError, \"No converter defined\"):\n self._test_loop_fn(\n loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])\n flags.FLAGS.op_conversion_fallback_to_while_loop = True\n self._test_loop_fn(\n loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])\n flags.FLAGS.op_conversion_fallback_to_while_loop = False\n\n def test_parallel_iterations(self):\n for parallel_iterations in [2, 3, 8, 10]:\n x = random_ops.random_uniform([8, 3])\n\n # pylint: disable=cell-var-from-loop\n def loop_fn(i):\n return array_ops.gather(x, i)\n # pylint: enable=cell-var-from-loop\n\n self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)\n self._test_loop_fn(loop_fn, 4 * constant_op.constant(2),\n parallel_iterations=parallel_iterations)\n\n def test_parallel_iterations_zero(self):\n with self.assertRaisesRegexp(ValueError, \"positive integer\"):\n pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)\n with self.assertRaisesRegexp(TypeError, \"positive integer\"):\n pfor_control_flow_ops.for_loop(lambda i: 1, dtypes.int32, 8,\n parallel_iterations=0)\n\n def test_parallel_iterations_one(self):\n with self.assertRaisesRegexp(ValueError, \"Use for_loop instead\"):\n pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)\n\n def test_vectorized_map(self):\n def compute(x):\n return math_ops.reduce_mean(x, axis=0, keepdims=True)\n result = pfor_control_flow_ops.vectorized_map(\n compute, array_ops.ones((10, 5, 3)))\n self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass IndexedSlicesTest(PForTestCase):\n\n def test_indexed_slices(self):\n\n def loop_fn(i):\n return indexed_slices.IndexedSlices(\n indices=i,\n values=array_ops.reshape(i, [1]),\n dense_shape=[3, 1])\n\n self._test_loop_fn(loop_fn, 2, loop_fn_dtypes=[dtypes.int32])\n\n def test_indexed_slices_components(self):\n\n def loop_fn(i):\n slices = indexed_slices.IndexedSlices(\n indices=i,\n values=array_ops.reshape(i, [1]),\n dense_shape=[3, 1])\n # Note that returning the components inside the slice avoids\n # densification, which may be more efficient.\n return slices.values, slices.indices\n\n self._test_loop_fn(loop_fn, 2, loop_fn_dtypes=[dtypes.int32] * 2)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ReductionTest(PForTestCase):\n\n def test_reduce_concat(self):\n x = random_ops.random_uniform([8, 3])\n\n def loop_fn(i, pfor_config):\n x_i = array_ops.gather(x, i)\n vectorized_value = pfor_config.reduce_concat(x_i)\n mean_value = math_ops.reduce_mean(vectorized_value, axis=0)\n return x_i - mean_value\n\n output = pfor_control_flow_ops.pfor(loop_fn, 8)\n ans = x - math_ops.reduce_mean(x, axis=0)\n output_val, ans_val = self.evaluate([output, ans])\n self.assertAllClose(ans_val, output_val)\n\n def test_reduce_mean(self):\n x = random_ops.random_uniform([8, 3])\n\n def loop_fn(i, pfor_config):\n x_i = array_ops.gather(x, i)\n return x_i - pfor_config.reduce_mean(x_i)\n\n output = pfor_control_flow_ops.pfor(loop_fn, 8)\n ans = x - math_ops.reduce_mean(x, axis=0)\n output_val, ans_val = self.evaluate([output, ans])\n self.assertAllClose(ans_val, output_val)\n\n def test_reduce_sum(self):\n x = random_ops.random_uniform([8, 3])\n\n def loop_fn(i, pfor_config):\n x_i = array_ops.gather(x, i)\n return x_i - pfor_config.reduce_sum(x_i)\n\n output = pfor_control_flow_ops.pfor(loop_fn, 8)\n ans = x - math_ops.reduce_sum(x, axis=0)\n output_val, ans_val = self.evaluate([output, ans])\n self.assertAllClose(ans_val, output_val)\n\n def test_reduce_class(self):\n x = random_ops.random_uniform([8, 3])\n\n class LoopFn(object):\n\n def __init__(self):\n pass\n\n def __call__(self, i, pfor_config):\n x_i = array_ops.gather(x, i)\n return x_i - pfor_config.reduce_mean(x_i)\n\n output = pfor_control_flow_ops.pfor(LoopFn(), 8)\n ans = x - math_ops.reduce_mean(x, axis=0)\n output_val, ans_val = self.evaluate([output, ans])\n self.assertAllClose(ans_val, output_val)\n\n def test_reduce_functools_partial(self):\n x = random_ops.random_uniform([8, 3])\n\n def fn(i, pfor_config, dummy=None):\n del dummy\n x_i = array_ops.gather(x, i)\n return x_i - pfor_config.reduce_mean(x_i)\n\n loop_fn = functools.partial(fn, dummy=1)\n output = pfor_control_flow_ops.pfor(loop_fn, 8)\n ans = x - math_ops.reduce_mean(x, axis=0)\n output_val, ans_val = self.evaluate([output, ans])\n self.assertAllClose(ans_val, output_val)\n\n def test_parallel_iterations(self):\n x = random_ops.random_uniform([8, 3])\n\n def loop_fn(i, pfor_config):\n x_i = array_ops.gather(x, i)\n return pfor_config.reduce_sum(x_i)\n\n with self.assertRaisesRegexp(\n ValueError, \"parallel_iterations currently unsupported\"):\n pfor_control_flow_ops.pfor(loop_fn, 8, parallel_iterations=2)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass BitwiseTest(PForTestCase):\n\n def test_unary_cwise(self):\n for op in [bitwise_ops.invert]:\n x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)\n\n # pylint: disable=cell-var-from-loop\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return op(x1)\n # pylint: enable=cell-var-from-loop\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])\n\n def test_binary_cwise(self):\n binary_ops = [\n bitwise_ops.bitwise_and,\n bitwise_ops.bitwise_or,\n bitwise_ops.bitwise_xor,\n bitwise_ops.left_shift,\n bitwise_ops.right_shift,\n ]\n for op in binary_ops:\n x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)\n y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)\n\n output_dtypes = []\n # pylint: disable=cell-var-from-loop\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n y1 = array_ops.gather(y, i)\n outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]\n del output_dtypes[:]\n output_dtypes.extend([t.dtype for t in outputs])\n return outputs\n # pylint: enable=cell-var-from-loop\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass NNTest(PForTestCase):\n\n def test_conv2d(self):\n x = random_ops.random_uniform([3, 2, 12, 12, 3])\n filt = random_ops.random_uniform([3, 3, 3, 7])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return nn.conv2d(\n x1, filt, strides=[1, 2, 2, 1], padding=\"VALID\", data_format=\"NHWC\")\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_conv2d_backprop_input(self):\n x_shape = [2, 12, 12, 3]\n filt = random_ops.random_uniform([3, 3, 3, 7])\n grad = random_ops.random_uniform([3, 2, 5, 5, 7])\n\n def loop_fn(i):\n grad1 = array_ops.gather(grad, i)\n return nn.conv2d_backprop_input(\n x_shape,\n filt,\n grad1,\n strides=[1, 2, 2, 1],\n padding=\"VALID\",\n data_format=\"NHWC\")\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_conv2d_backprop_filter(self):\n x = random_ops.random_uniform([3, 2, 12, 12, 3])\n x_0 = array_ops.gather(x, 0)\n filter_sizes = [3, 3, 3, 7]\n grad = random_ops.random_uniform([3, 2, 5, 5, 7])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n grad_i = array_ops.gather(grad, i)\n return [\n nn.conv2d_backprop_filter(\n inp,\n filter_sizes,\n grad_i,\n strides=[1, 2, 2, 1],\n padding=\"VALID\",\n data_format=\"NHWC\") for inp in [x_i, x_0]\n ]\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)\n\n def test_avg_pool(self):\n with backprop.GradientTape(persistent=True) as g:\n x = random_ops.random_uniform([3, 2, 12, 12, 3])\n g.watch(x)\n ksize = [1, 3, 3, 1]\n\n def loop_fn(i):\n with g:\n x1 = array_ops.gather(x, i)\n output = nn.avg_pool(\n x1, ksize, strides=[1, 2, 2, 1], padding=\"VALID\",\n data_format=\"NHWC\")\n loss = nn.l2_loss(output)\n return output, g.gradient(loss, x1)\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)\n\n def test_max_pool(self):\n with backprop.GradientTape(persistent=True) as g:\n x = random_ops.random_uniform([3, 2, 12, 12, 3])\n g.watch(x)\n ksize = [1, 3, 3, 1]\n strides = [1, 2, 2, 1]\n\n def loop_fn(i):\n with g:\n x1 = array_ops.gather(x, i)\n output = nn.max_pool(\n x1, ksize, strides=strides, padding=\"VALID\", data_format=\"NHWC\")\n loss = nn.l2_loss(output)\n ones = array_ops.ones_like(output)\n g.watch(ones)\n grad = g.gradient(loss, x1, output_gradients=ones)\n grad_grad = g.gradient(grad, ones)\n return output, grad, grad_grad\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)\n\n def test_max_pool3d(self):\n with backprop.GradientTape(persistent=True) as g:\n x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])\n g.watch(x)\n ksize = [1, 1, 3, 3, 1]\n strides = [1, 1, 2, 2, 1]\n\n def loop_fn(i):\n with g:\n x1 = array_ops.gather(x, i)\n output = nn.max_pool3d(\n x1, ksize, strides=strides, padding=\"VALID\", data_format=\"NDHWC\")\n loss = nn.l2_loss(output)\n ones = array_ops.ones_like(output)\n g.watch(ones)\n grad = g.gradient(loss, x1, output_gradients=ones)\n grad_grad = g.gradient(grad, ones)\n return output, grad, grad_grad\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)\n\n def test_fused_batch_norm(self):\n with compat.forward_compatibility_horizon(2019, 6, 7):\n data_formats = [\"NHWC\"]\n if test.is_gpu_available():\n data_formats.append(\"NCHW\")\n for is_training in (True, False):\n for data_format in data_formats:\n with backprop.GradientTape(persistent=True) as g:\n if data_format == \"NCHW\":\n x = random_ops.random_uniform([3, 1, 2, 5, 5])\n else:\n x = random_ops.random_uniform([3, 1, 5, 5, 2])\n g.watch(x)\n scale = random_ops.random_uniform([2])\n g.watch(scale)\n offset = random_ops.random_uniform([2])\n g.watch(offset)\n mean = None if is_training else random_ops.random_uniform([2])\n variance = None if is_training else random_ops.random_uniform([2])\n\n # pylint: disable=cell-var-from-loop\n def loop_fn(i):\n with g:\n x1 = array_ops.gather(x, i)\n outputs = nn.fused_batch_norm(\n x1,\n scale,\n offset,\n mean=mean,\n variance=variance,\n epsilon=0.01,\n data_format=data_format,\n is_training=is_training)\n outputs = list(outputs)\n # We only test the first value of outputs when is_training is\n # False. It looks like CPU and GPU have different outputs for\n # batch_mean and batch_variance for this case.\n if not is_training:\n outputs[1] = constant_op.constant(0.)\n outputs[2] = constant_op.constant(0.)\n loss = nn.l2_loss(outputs[0])\n if is_training:\n gradients = g.gradient(loss, [x1, scale, offset])\n else:\n gradients = [constant_op.constant(0.)] * 3\n return outputs + gradients\n\n # pylint: enable=cell-var-from-loop\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 6)\n\n def test_log_softmax(self):\n logits = random_ops.random_uniform([3, 2, 4])\n\n def loop_fn(i):\n logits_i = array_ops.gather(logits, i)\n return (nn.log_softmax(logits_i),\n nn.log_softmax(logits_i, axis=0),\n nn.log_softmax(logits_i, axis=-1))\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)\n\n def test_softmax(self):\n logits = random_ops.random_uniform([3, 2, 4])\n\n def loop_fn(i):\n logits_i = array_ops.gather(logits, i)\n return (nn.softmax(logits_i),\n nn.softmax(logits_i, axis=0),\n nn.softmax(logits_i, axis=-1))\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)\n\n def test_softmax_cross_entropy_with_logits(self):\n with backprop.GradientTape(persistent=True) as g:\n logits = random_ops.random_uniform([3, 2, 4])\n g.watch(logits)\n labels = random_ops.random_uniform([3, 2, 4])\n labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)\n\n def loop_fn(i):\n with g:\n logits_i = array_ops.gather(logits, i)\n labels_i = array_ops.gather(labels, i)\n loss = nn.softmax_cross_entropy_with_logits(\n labels=labels_i, logits=logits_i)\n total_loss = math_ops.reduce_sum(loss)\n return loss, g.gradient(total_loss, logits_i)\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)\n\n\nclass RandomTest(PForTestCase):\n\n # The random values generated in the two implementations are not guaranteed to\n # match. So we only check the returned shapes.\n def run_and_assert_equal(self, targets1, targets2):\n outputs = self._run_targets(targets1, targets2)\n n = len(outputs) // 2\n for i in range(n):\n self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)\n\n def test_random_uniform(self):\n\n def loop_fn(_):\n return random_ops.random_uniform([3])\n\n self._test_loop_fn(loop_fn, 5)\n\n def test_random_uniform_int(self):\n\n def loop_fn(_):\n return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)\n\n self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=dtypes.int32)\n\n def test_random_standard_normal(self):\n\n def loop_fn(_):\n return random_ops.random_normal([3])\n\n self._test_loop_fn(loop_fn, 5)\n\n def test_truncated_normal(self):\n\n def loop_fn(_):\n return random_ops.truncated_normal([3])\n\n self._test_loop_fn(loop_fn, 5)\n\n def test_random_gamma_invariant_alpha(self):\n\n def loop_fn(_):\n return random_ops.random_gamma([3], alpha=[0.5])\n\n self._test_loop_fn(loop_fn, 5)\n\n def test_random_gamma_varying_alpha(self):\n alphas = math_ops.exp(random_ops.random_normal([5, 3, 2]))\n\n def loop_fn(i):\n alphas_i = array_ops.gather(alphas, i)\n # Test both scalar and non-scalar params and shapes.\n return (random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[]),\n random_ops.random_gamma(alpha=alphas_i, shape=[]),\n random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[3]),\n random_ops.random_gamma(alpha=alphas_i, shape=[3]))\n\n self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.float32] * 4)\n\n def test_random_poisson_v2_invariant_rate(self):\n\n def loop_fn(_):\n return random_ops.random_poisson(lam=[1.3], shape=[3])\n\n self._test_loop_fn(loop_fn, 5)\n\n def test_random_poisson_v2_varying_rate(self):\n rates = math_ops.exp(random_ops.random_normal([5, 3, 2]))\n\n def loop_fn(i):\n rates_i = array_ops.gather(rates, i)\n # Test both scalar and non-scalar params and shapes.\n return (random_ops.random_poisson(lam=rates_i[0, 0], shape=[]),\n random_ops.random_poisson(lam=rates_i, shape=[]),\n random_ops.random_poisson(lam=rates_i[0, 0], shape=[3]),\n random_ops.random_poisson(lam=rates_i, shape=[3]))\n\n self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.float32] * 4)\n\n def test_random_multinomial_invariant_logits(self):\n\n def loop_fn(_):\n return random_ops.categorical(logits=[[1., -1.]], num_samples=3)\n\n self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.int64])\n\n def test_random_multinomial_varying_logits(self):\n logits = random_ops.random_normal([5, 3, 2])\n\n def loop_fn(i):\n logits_i = array_ops.gather(logits, i)\n return random_ops.categorical(logits_i, num_samples=3)\n\n self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.int64])\n\n\nclass LoggingTest(PForTestCase):\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_print(self):\n x = random_ops.random_uniform([3, 5])\n\n def loop_fn(i):\n x1 = array_ops.gather(x, i)\n return logging_ops.Print(\n x1, [x1, \"x1\", array_ops.shape(x1)], summarize=10)\n\n self._test_loop_fn(loop_fn, 3)\n\n def test_assert(self):\n\n def loop_fn(i):\n return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])\n\n # TODO(agarwal): make this work with for_loop.\n with session.Session() as sess:\n sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))\n\n\nclass TensorArrayTest(PForTestCase):\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_create_outside_and_read(self):\n\n ta = tensor_array_ops.TensorArray(\n dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)\n\n def loop_fn(i):\n return ta.read(i), ta.read(0)\n\n self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_create_outside_and_gather(self):\n\n ta = tensor_array_ops.TensorArray(\n dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)\n\n def loop_fn(i):\n return ta.gather([i]), ta.gather([0, 1])\n\n self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_create_outside_and_write_and_scatter(self):\n\n t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)\n handle = t.handle\n\n def loop_fn(i):\n ta = t.write(i + 2, 2 * i).write(i, 5)\n ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])\n return ta.flow\n\n t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)\n out1 = tensor_array_ops.TensorArray(\n dtypes.int32, handle=handle, flow=t1[-1]).stack()\n output1 = self._run_targets(out1)\n\n t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)\n out2 = tensor_array_ops.TensorArray(\n dtypes.int32, handle=handle, flow=t2[-1]).stack()\n output2 = self._run_targets(out2)\n self.assertAllClose(output2, output1)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_create_inside_and_write(self):\n\n def loop_fn(i):\n # TODO(agarwal): switching the order of writes to ta1 does not work.\n ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0, i).write(\n 1, 1)\n ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)\n return ta1.stack(), ta2.stack()\n\n self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_create_inside_and_scatter(self):\n\n def loop_fn(i):\n # TODO(agarwal): switching the order of scatter to ta1 does not work.\n ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).scatter(\n [0], [[i, 2]]).scatter([1], [[1, 2]])\n ta2 = tensor_array_ops.TensorArray(dtypes.int32,\n 2).scatter([0], [3]).scatter([1], [4])\n return ta1.stack(), ta2.stack()\n\n self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_create_inside_and_read(self):\n\n def loop_fn(i):\n ta1 = tensor_array_ops.TensorArray(\n dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)\n ta2 = tensor_array_ops.TensorArray(\n dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)\n # TODO(agarwal): ta1.read(i) currently is not supported.\n return ta1.read(0), ta2.read(0), ta2.read(i)\n\n self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_create_inside_and_gather(self):\n\n def loop_fn(i):\n ta1 = tensor_array_ops.TensorArray(\n dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)\n ta2 = tensor_array_ops.TensorArray(\n dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)\n # TODO(agarwal): ta1.read(i) currently is not supported.\n return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])\n\n self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_grad(self):\n x = random_ops.random_uniform([3, 2])\n ta = tensor_array_ops.TensorArray(\n dtypes.float32, 3, clear_after_read=False).unstack(x)\n y = math_ops.square(ta.stack())\n\n def loop_fn(i):\n y_i = array_ops.gather(y, i)\n grad = gradient_ops.gradients(y_i, x)[0]\n return array_ops.gather(grad, i)\n\n t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)\n # y = x * x. Hence dy/dx = 2 * x.\n actual_grad = 2.0 * x\n with session.Session() as sess:\n actual_grad, computed_grad = sess.run([t1, actual_grad])\n self.assertAllClose(actual_grad, computed_grad)\n\n\nclass StackTest(PForTestCase):\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_stack_inside_loop_invariant(self):\n\n def loop_fn(_):\n s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)\n op1 = data_flow_ops.stack_push_v2(s, 1)\n with ops.control_dependencies([op1]):\n op2 = data_flow_ops.stack_push_v2(s, 2)\n with ops.control_dependencies([op2]):\n e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)\n with ops.control_dependencies([e2]):\n e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)\n return e1, e2\n\n self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_stack_inside_push_loop_dependent(self):\n\n def loop_fn(i):\n s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)\n op1 = data_flow_ops.stack_push_v2(s, i)\n with ops.control_dependencies([op1]):\n op2 = data_flow_ops.stack_push_v2(s, 2)\n with ops.control_dependencies([op2]):\n e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)\n with ops.control_dependencies([e2]):\n e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)\n return e1, e2\n\n self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_stack_outside_pop(self):\n s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)\n op = data_flow_ops.stack_push_v2(s, 5)\n with ops.control_dependencies([op]):\n op = data_flow_ops.stack_push_v2(s, 6)\n with ops.control_dependencies([op]):\n op = data_flow_ops.stack_push_v2(s, 7)\n\n def loop_fn(_):\n e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)\n with ops.control_dependencies([e1]):\n e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)\n return e1, e2\n\n with ops.control_dependencies([op]):\n e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)\n with ops.control_dependencies([e1, e2]):\n e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)\n v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)\n self.assertAllEqual([7, 7], v1)\n self.assertAllEqual([6, 6], v2)\n self.assertAllEqual(5, v3)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_stack_outside_push(self):\n s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)\n\n def loop_fn(_):\n return data_flow_ops.stack_push_v2(s, 7)\n\n with self.assertRaisesRegexp(ValueError, \"StackPushV2 not allowed.*\"):\n pfor_control_flow_ops.pfor(loop_fn, iters=2)\n\n\n# TODO(agarwal): test nested while_loops. This currently requires converting a\n# tf.cond.\nclass ControlFlowTest(PForTestCase):\n\n def test_while_outside_loop(self):\n\n x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])\n\n def loop_fn(i):\n return x + i\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_invariant_while(self):\n\n def loop_fn(_):\n return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_invariant_while_with_control_dependency(self):\n\n def loop_fn(i):\n with ops.control_dependencies([i]):\n return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,\n [0])\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_while_with_stateful_ops(self):\n\n def loop_fn(_):\n return control_flow_ops.while_loop(\n lambda j, x: j < 4,\n lambda j, x: (j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_while_unstacked_condition(self):\n\n def loop_fn(i):\n return control_flow_ops.while_loop(lambda j, x: j < 4,\n lambda j, x: (j + 1, x + i), [0, 0])\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int32])\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_while(self):\n x = random_ops.random_uniform([3, 5])\n lengths = constant_op.constant([4, 0, 2])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n lengths_i = array_ops.gather(lengths, i)\n\n _, total = control_flow_ops.while_loop(\n lambda j, _: j < lengths_i,\n lambda j, t: (j + 1, t + array_ops.gather(x_i, j)), [0, 0.])\n return total\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_while_jacobian(self):\n x = random_ops.random_uniform([1, 3])\n y = random_ops.random_uniform([3, 3])\n\n # out = x @ y @ y @ y @ y, where @ is matmul operator.\n _, out = control_flow_ops.while_loop(\n lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),\n [0, x])\n\n def loop_fn(i):\n out_i = array_ops.gather(out, i, axis=1)\n return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])\n\n out = pfor_control_flow_ops.pfor(loop_fn, iters=3)\n\n # The above code does not work with tf.while_loop instead of pfor. So we\n # manually compute the expected output here.\n # Note that gradient of output w.r.t is (y @ y @ y @ y)^T.\n expected_output = y\n for _ in range(3):\n expected_output = math_ops.matmul(expected_output, y)\n expected_output = array_ops.transpose(expected_output, [1, 0])\n\n with session.Session() as sess:\n out, expected = sess.run([out, expected_output])\n self.assertAllClose(expected, out)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_tensor_array_as_loop_variable(self):\n\n def loop_fn(i):\n\n def body(j, ta):\n ta = ta.write(j, i + j * j)\n return j + 1, ta\n\n _, ta = control_flow_ops.while_loop(\n lambda j, _: j < 4, body,\n (0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))\n return ta.stack()\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_read_tensor_array_partitioned_indices(self):\n # Note that tensor array values are pfor loop dependent, and the while loop\n # termination condition is also dependent on pfor iteration.\n def loop_fn(i):\n ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)\n ta = ta.unstack(i + list(range(5)))\n\n def body(j, s):\n return j + 1, s + ta.read(j)\n\n _, s = control_flow_ops.while_loop(lambda j, _: j < i,\n body,\n (0, 0))\n return s\n\n self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_external_while_loop_grad(self):\n # Here we test that external while_loops that are extended from inside pfor\n # (due to gradient calls) are not actually converted. If the below was\n # converted all pfor iterations would write to the same tensor array\n # indices.\n x = constant_op.constant(1.)\n\n def body(j, ta):\n ta = ta.write(j, x)\n return j + 1, ta\n\n _, ta = control_flow_ops.while_loop(\n lambda j, _: j < 4, body,\n (0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))\n out = ta.stack()\n\n def loop_fn(i):\n out_i = array_ops.gather(out, i)\n return gradient_ops.gradients(out_i, x)[0]\n\n with session.Session() as sess:\n # out is [x, x, x]. Hence the gradients should be [1, 1, 1].\n self.assertAllEqual([1, 1, 1],\n sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_tensor_array_grad(self):\n inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)\n ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)\n ta = ta.unstack(inp)\n\n def loop_fn(i):\n\n def body(j, x):\n value = ta.gather([j])\n value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)\n return j + 1, x + value\n\n _, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,\n (0, array_ops.zeros([2])))\n out = math_ops.reduce_prod(out)\n return out, gradient_ops.gradients(out, inp)[0]\n\n pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)\n # Note that tf.while_loop does not work in the setup above. So we manually\n # construct the equivalent computation of the above loops here.\n real_out = math_ops.reduce_sum(inp, axis=[0])\n real_out = math_ops.reduce_prod(real_out, axis=[1])\n # Note that gradients of real_out will accumulate the gradients across the\n # output value. Hence we do the same aggregation on pfor_out_grad.\n real_out_grad = gradient_ops.gradients(real_out, inp)[0]\n sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])\n\n with session.Session() as sess:\n v1, v2, v1_grad, v2_grad = sess.run(\n [pfor_out, real_out, sum_pfor_out_grad, real_out_grad])\n self.assertAllClose(v1, v2)\n self.assertAllClose(v1_grad, v2_grad)\n\n\ndef dynamic_lstm_input_fn(batch_size, state_size, max_steps):\n # We make inputs and sequence_length constant so that multiple session.run\n # calls produce the same result.\n inputs = constant_op.constant(\n np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)\n sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)\n sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)\n return inputs, sequence_length\n\n\ndef create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):\n cell = cell_fn(state_size)\n inputs, sequence_length = dynamic_lstm_input_fn(batch_size,\n state_size,\n max_steps)\n inputs_ta = tensor_array_ops.TensorArray(\n dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])\n inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])\n inputs_ta = inputs_ta.unstack(inputs_time_major)\n zeros = array_ops.zeros([state_size])\n\n def loop_fn(i):\n sequence_length_i = array_ops.gather(sequence_length, i)\n\n def body_fn(t, state, ta):\n inputs_t = array_ops.expand_dims(\n array_ops.gather(inputs_ta.read(t), i), 0)\n output, new_state = cell(inputs_t, state)\n output = array_ops.reshape(output, [-1])\n # TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the\n # array_ops.where when t < min(sequence_length). Doing that requires\n # supporting tf.cond pfor conversion.\n done = t >= sequence_length_i\n output = array_ops.where(done, zeros, output)\n ta = ta.write(t, output)\n new_state = [array_ops.where(done, s, ns) for s, ns in\n zip(nest.flatten(state), nest.flatten(new_state))]\n new_state = nest.pack_sequence_as(state, new_state)\n return t + 1, new_state, ta\n\n def condition_fn(t, _, unused):\n del unused\n return t < max_steps\n\n initial_state = cell.zero_state(1, dtypes.float32)\n _, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [\n 0, initial_state,\n tensor_array_ops.TensorArray(dtypes.float32, max_steps)\n ])\n\n new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]\n new_state = nest.pack_sequence_as(initial_state, new_state)\n return ta.stack(), new_state\n\n pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)\n tf_output = rnn.dynamic_rnn(\n cell,\n inputs,\n sequence_length=sequence_length,\n initial_state=cell.zero_state(batch_size, dtypes.float32))\n return pfor_output, tf_output\n\n\nclass RNNTest(PForTestCase):\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_dynamic_rnn(self):\n pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,\n 3, 5, 7)\n self.run_and_assert_equal(pfor_outputs, tf_outputs)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_dynamic_lstm(self):\n pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell,\n 3, 5, 7)\n self.run_and_assert_equal(pfor_outputs, tf_outputs)\n\n\n# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop\n# conversion don't look good. Some of it seems like lot of copies between host\n# and device. Optimize that.\nclass Benchmarks(test.Benchmark):\n\n def _run(self, targets, iters, name=None):\n\n def _done(t):\n # Note that we don't use tf.control_dependencies since that will not make\n # sure that the computation on GPU has actually finished. So we fetch the\n # first element of the output, and assume that this will not be called on\n # empty tensors.\n return array_ops.gather(array_ops.reshape(t, [-1]), 0)\n\n targets = [_done(x) for x in nest.flatten(targets)]\n sess = session.Session()\n with sess:\n init = variables.global_variables_initializer()\n sess.run(init)\n run_fn = sess.make_callable(targets)\n run_fn() # Warm up\n begin = time.time()\n for _ in range(iters):\n run_fn()\n end = time.time()\n avg_time_ms = 1000 * (end - begin) / iters\n self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)\n return avg_time_ms\n\n def benchmark_sess_run_overhead(self):\n with ops.Graph().as_default():\n x = constant_op.constant(1.0)\n self._run(x, 10000, name=\"session_run_overhead\")\n\n def benchmark_add(self):\n with ops.Graph().as_default():\n n = 256\n params = 1000\n x = random_ops.random_normal([n, params])\n y = random_ops.random_normal([n, params])\n\n def loop_fn(i):\n x_i = array_ops.gather(x, i)\n y_i = array_ops.gather(y, i)\n return x_i + y_i\n\n pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)\n while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)\n manual = x + y\n\n self._run(manual, 1000, name=\"manual_add\")\n self._run(pfor_outputs, 1000, name=\"pfor_add\")\n self._run(while_outputs, 100, name=\"while_add\")\n\n def benchmark_matmul(self):\n with ops.Graph().as_default():\n n = 1024\n params = 1000\n x = random_ops.random_normal([n, params])\n y = random_ops.random_normal([params, params])\n\n def loop_fn(i):\n x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)\n return math_ops.matmul(x_i, y)\n\n pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)\n while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)\n manual = math_ops.matmul(x, y)\n\n self._run(manual, 1000, name=\"manual_matmul\")\n self._run(pfor_outputs, 1000, name=\"pfor_matmul\")\n self._run(while_outputs, 100, name=\"while_matmul\")\n\n def benchmark_map_fn(self):\n with ops.Graph().as_default():\n b = 256\n params = 1000\n inp = random_ops.random_normal((b, params))\n fn = lambda x: x * x\n\n def pfor_map_fn(f, x):\n return pfor_control_flow_ops.pfor(\n lambda i: f(array_ops.gather(x, i)),\n array_ops.shape(x)[0])\n\n map_output = map_fn.map_fn(fn, inp)\n pfor_output = pfor_map_fn(fn, inp)\n\n self._run(map_output, 100, name=\"tf_map_fn\")\n self._run(pfor_output, 100, name=\"pfor_map_fn\")\n\n def benchmark_basic_while(self):\n with ops.Graph().as_default():\n\n def loop_fn(i):\n _, s = control_flow_ops.while_loop(\n lambda t, x: t < i,\n lambda t, x: (t + 1, x + i),\n [0, 0])\n return s\n\n iters = 50\n pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)\n for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,\n iters)\n self._run(pfor_output, 100, name=\"pfor_basic\")\n self._run(for_loop_output, 100, name=\"for_loop_basic\")\n\n def benchmark_dynamic_rnn(self):\n with ops.Graph().as_default():\n pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,\n 128, 512, 16)\n self._run(pfor_outputs, 100, name=\"pfor_rnn\")\n self._run(tf_outputs, 100, name=\"tf_rnn\")\n\n def benchmark_reduction(self):\n n = 1024\n with ops.Graph().as_default():\n x = random_ops.random_uniform([n, n])\n w = random_ops.random_uniform([n, n])\n\n def loop_fn(i, pfor_config):\n x_i = array_ops.gather(x, i)\n return math_ops.reduce_sum(\n math_ops.matmul(pfor_config.reduce_concat(x_i), w))\n\n # Note that output_reduction will be tiled, so there may be some minor\n # overheads compared to output_no_reduction.\n output_reduction = pfor_control_flow_ops.pfor(loop_fn, n)\n output_no_reduction = math_ops.reduce_sum(math_ops.matmul(x, w))\n # Benchmark to test that reduction does not add overhead and its output is\n # treated as loop invariant.\n self._run(output_reduction, 30, name=\"matmul_reduction\")\n self._run(output_no_reduction, 30, name=\"matmul_no_reduction\")\n\n\nclass SparseTest(PForTestCase):\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_var_loop_len(self):\n num_iters = array_ops.placeholder(dtypes.int32)\n\n def loop_fn(_):\n return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],\n [3]) # [0, 2, 0]\n\n pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)\n with self.cached_session() as sess:\n sess.run(pfor, feed_dict={num_iters: 3})\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_sparse_result_none_stacked(self):\n num_iters = 10\n\n def loop_fn(_):\n return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],\n [3]) # [0, 2, 0]\n\n pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)\n\n indices = [[i, j] for i in range(num_iters) for j in range(3)]\n values = [4, 5, 6] * num_iters\n dense_shapes = [num_iters, 3]\n # Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]\n manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)\n self.run_and_assert_equal(pfor, manual)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_sparse_result_all_stacked(self):\n num_iters = 10\n\n def loop_fn(i):\n i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)\n indices = array_ops.expand_dims(i, 0)\n return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]\n\n # Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]\n pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)\n manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],\n list(range(num_iters)),\n (num_iters, num_iters))\n self.run_and_assert_equal(pfor, manual)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_sparse_result_indices_stacked(self):\n num_iters = 10\n\n def loop_fn(i):\n i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)\n indices = array_ops.expand_dims(i, 0)\n return sparse_tensor.SparseTensor(indices, [1], [num_iters])\n\n # Expected result: identity matrix size num_iters * num_iters\n pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)\n manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],\n [1] * num_iters, (num_iters, num_iters))\n self.run_and_assert_equal(pfor, manual)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_sparse_result_values_stacked(self):\n num_iters = 10\n\n def loop_fn(i):\n i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)\n return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]\n\n # Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]\n pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)\n manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],\n list(range(num_iters)),\n (num_iters, num_iters))\n self.run_and_assert_equal(pfor, manual)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_sparse_result_shapes_stacked(self):\n num_iters = 10\n\n def loop_fn(i):\n i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)\n return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]\n\n # Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]\n pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)\n manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],\n [1] * num_iters, (num_iters, num_iters))\n self.run_and_assert_equal(pfor, manual)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_sparse_result_shapes_stacked_2D(self):\n num_iters = 10\n\n def loop_fn(i):\n i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)\n shape = array_ops.concat([i, i], 0)\n return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]\n\n # Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]\n pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)\n manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],\n [1] * num_iters,\n (num_iters, num_iters, num_iters))\n self.run_and_assert_equal(pfor, manual)\n\n\nclass ParsingTest(PForTestCase):\n\n def test_decode_csv(self):\n csv_tensor = constant_op.constant([[\"1:2:3\"], [\"::\"], [\"7:8:9\"]])\n kwargs = {\"record_defaults\": [[10], [20], [30]], \"field_delim\": \":\"}\n\n def loop_fn(i):\n line = array_ops.gather(csv_tensor, i)\n return parsing_ops.decode_csv(line, **kwargs)\n\n self._test_loop_fn(loop_fn, iters=3, loop_fn_dtypes=[dtypes.int32] * 3)\n\n @test_util.run_v1_only(\"b/122612051\")\n def test_parse_single_example(self):\n\n def _int64_feature(*values):\n return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))\n\n def _bytes_feature(*values):\n return feature_pb2.Feature(\n bytes_list=feature_pb2.BytesList(\n value=[v.encode(\"utf-8\") for v in values]))\n\n examples = constant_op.constant([\n example_pb2.Example(\n features=feature_pb2.Features(\n feature={\n \"dense_int\": _int64_feature(i),\n \"dense_str\": _bytes_feature(str(i)),\n \"sparse_int\": _int64_feature(i, i * 2, i * 4, i * 8),\n \"sparse_str\": _bytes_feature(*[\"abc\"] * i)\n })).SerializeToString() for i in range(10)\n ])\n\n features = {\n \"dense_int\": parsing_ops.FixedLenFeature((), dtypes.int64, 0),\n \"dense_str\": parsing_ops.FixedLenFeature((), dtypes.string, \"\"),\n \"sparse_int\": parsing_ops.VarLenFeature(dtypes.int64),\n \"sparse_str\": parsing_ops.VarLenFeature(dtypes.string),\n }\n\n def loop_fn(i):\n example_proto = array_ops.gather(examples, i)\n f = parsing_ops.parse_single_example(example_proto, features)\n return f\n\n pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)\n manual = parsing_ops.parse_example(examples, features)\n self.run_and_assert_equal(pfor, manual)\n\n\nclass PartitionedCallTest(PForTestCase):\n\n def test_simple(self):\n\n @def_function.function\n def f(x):\n return math_ops.square(x) + 1\n\n z = random_ops.random_uniform([4])\n\n def loop_fn(i):\n return f(array_ops.gather(z, i))\n\n self._test_loop_fn(loop_fn, 4)\n\n def test_nested_calls(self):\n\n @def_function.function\n def inner(x):\n return math_ops.square(x)\n\n @def_function.function\n def outer(y):\n return math_ops.reduce_sum(inner(y)) + 2\n\n z = random_ops.random_uniform([4, 2])\n\n def loop_fn(i):\n return outer(array_ops.gather(z, i))\n\n self._test_loop_fn(loop_fn, 4)\n\n def test_nested_definition(self):\n\n @def_function.function\n def outer(y):\n @def_function.function\n def inner(x):\n return math_ops.square(x) + 1\n\n return math_ops.reduce_sum(inner(y)) + 2\n\n z = random_ops.random_uniform([4, 2])\n\n def loop_fn(i):\n return outer(array_ops.gather(z, i))\n\n self._test_loop_fn(loop_fn, 4)\n\n def test_gradients(self):\n\n @def_function.function\n def f(x):\n return math_ops.square(x) + 1\n\n z = random_ops.random_uniform([4, 2])\n\n def loop_fn(i):\n z_i = array_ops.gather(z, i)\n with backprop.GradientTape() as g:\n g.watch(z_i)\n out = f(z_i)\n return out, g.gradient(out, z_i)\n\n self._test_loop_fn(loop_fn, 4, [dtypes.float32] * 2)\n\n def test_stateful_with_gradients(self):\n\n z = random_ops.random_uniform([4, 2])\n v = variables.Variable(z[0])\n\n @def_function.function\n def f(x):\n return math_ops.square(x) + v + 1\n\n def loop_fn(i):\n z_i = array_ops.gather(z, i)\n with backprop.GradientTape() as g:\n g.watch(z_i)\n out = f(z_i)\n return out, g.gradient(out, z_i)\n\n self._test_loop_fn(loop_fn, 4, [dtypes.float32] * 2)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.ops.data_flow_ops.stack_push_v2",
"tensorflow.python.ops.nn.softmax",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.parsing_ops.parse_single_example",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.nn.conv2d",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.ops.control_flow_ops.Assert",
"numpy.random.randint",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.random_ops.random_poisson",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.parsing_ops.decode_csv",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.data_flow_ops.stack_pop_v2",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.nn.conv2d_backprop_filter",
"tensorflow.python.ops.random_ops.categorical",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.parsing_ops.FixedLenFeature",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.python.ops.nn.log_softmax",
"tensorflow.python.ops.parallel_for.control_flow_ops.pfor",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.random_ops.random_gamma",
"tensorflow.python.ops.nn.conv2d_backprop_input",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.nn.top_k",
"tensorflow.python.ops.parallel_for.control_flow_ops.for_loop",
"numpy.random.rand",
"tensorflow.core.example.feature_pb2.Int64List",
"tensorflow.python.compat.compat.forward_compatibility_horizon",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.nn.l2_loss",
"tensorflow.python.ops.nn.avg_pool",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.data_flow_ops.stack_v2",
"tensorflow.python.ops.gradients.gradients",
"tensorflow.python.ops.parsing_ops.VarLenFeature",
"tensorflow.python.ops.nn.max_pool",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.map_fn.map_fn",
"tensorflow.python.ops.nn.softmax_cross_entropy_with_logits",
"tensorflow.python.ops.nn.fused_batch_norm",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.random_ops.random_normal",
"tensorflow.python.ops.parsing_ops.parse_example",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.nn.max_pool3d",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"2.7",
"1.4",
"2.2",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.6",
"1.2",
"2.10"
]
}
] |
w-sugar/slowfast | [
"7d5d759ddfd759298e10e7f49ba343c9d4229437"
] | [
"slowfast/datasets/utils.py"
] | [
"#!/usr/bin/env python3\n\nimport logging\nimport numpy as np\nimport os\nimport random\nimport time\nfrom collections import defaultdict\nimport cv2\nimport torch\nfrom iopath.common.file_io import g_pathmgr\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom . import transform as transform\n\nlogger = logging.getLogger(__name__)\n\n\ndef retry_load_images(image_paths, retry=10, backend=\"pytorch\"):\n \"\"\"\n This function is to load images with support of retrying for failed load.\n\n Args:\n image_paths (list): paths of images needed to be loaded.\n retry (int, optional): maximum time of loading retrying. Defaults to 10.\n backend (str): `pytorch` or `cv2`.\n\n Returns:\n imgs (list): list of loaded images.\n \"\"\"\n for i in range(retry):\n imgs = []\n for image_path in image_paths:\n with g_pathmgr.open(image_path, \"rb\") as f:\n img_str = np.frombuffer(f.read(), np.uint8)\n img = cv2.imdecode(img_str, flags=cv2.IMREAD_COLOR)\n imgs.append(img)\n\n if all(img is not None for img in imgs):\n if backend == \"pytorch\":\n imgs = torch.as_tensor(np.stack(imgs))\n return imgs\n else:\n logger.warn(\"Reading failed. Will retry.\")\n time.sleep(1.0)\n if i == retry - 1:\n raise Exception(\"Failed to load images {}\".format(image_paths))\n\n\ndef get_sequence(center_idx, half_len, sample_rate, num_frames):\n \"\"\"\n Sample frames among the corresponding clip.\n\n Args:\n center_idx (int): center frame idx for current clip\n half_len (int): half of the clip length\n sample_rate (int): sampling rate for sampling frames inside of the clip\n num_frames (int): number of expected sampled frames\n\n Returns:\n seq (list): list of indexes of sampled frames in this clip.\n \"\"\"\n seq = list(range(center_idx - half_len, center_idx + half_len, sample_rate))\n\n for seq_idx in range(len(seq)):\n if seq[seq_idx] < 0:\n seq[seq_idx] = 0\n elif seq[seq_idx] >= num_frames:\n seq[seq_idx] = num_frames - 1\n return seq\n\n\ndef pack_pathway_output(cfg, frames):\n \"\"\"\n Prepare output as a list of tensors. Each tensor corresponding to a\n unique pathway.\n Args:\n frames (tensor): frames of images sampled from the video. The\n dimension is `channel` x `num frames` x `height` x `width`.\n Returns:\n frame_list (list): list of tensors with the dimension of\n `channel` x `num frames` x `height` x `width`.\n \"\"\"\n if cfg.DATA.REVERSE_INPUT_CHANNEL:\n frames = frames[[2, 1, 0], :, :, :]\n if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH:\n frame_list = [frames]\n elif cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH:\n fast_pathway = frames\n # Perform temporal sampling from the fast pathway.\n slow_pathway = torch.index_select(\n frames,\n 1,\n torch.linspace(\n 0, frames.shape[1] - 1, frames.shape[1] // cfg.SLOWFAST.ALPHA\n ).long(),\n )\n frame_list = [slow_pathway, fast_pathway]\n else:\n raise NotImplementedError(\n \"Model arch {} is not in {}\".format(\n cfg.MODEL.ARCH,\n cfg.MODEL.SINGLE_PATHWAY_ARCH + cfg.MODEL.MULTI_PATHWAY_ARCH,\n )\n )\n return frame_list\n\n\ndef spatial_sampling(\n frames,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=224,\n random_horizontal_flip=True,\n inverse_uniform_sampling=False,\n aspect_ratio=None,\n scale=None,\n motion_shift=False,\n):\n \"\"\"\n Perform spatial sampling on the given video frames. If spatial_idx is\n -1, perform random scale, random crop, and random flip on the given\n frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling\n with the given spatial_idx.\n Args:\n frames (tensor): frames of images sampled from the video. The\n dimension is `num frames` x `height` x `width` x `channel`.\n spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,\n or 2, perform left, center, right crop if width is larger than\n height, and perform top, center, buttom crop if height is larger\n than width.\n min_scale (int): the minimal size of scaling.\n max_scale (int): the maximal size of scaling.\n crop_size (int): the size of height and width used to crop the\n frames.\n inverse_uniform_sampling (bool): if True, sample uniformly in\n [1 / max_scale, 1 / min_scale] and take a reciprocal to get the\n scale. If False, take a uniform sample from [min_scale,\n max_scale].\n aspect_ratio (list): Aspect ratio range for resizing.\n scale (list): Scale range for resizing.\n motion_shift (bool): Whether to apply motion shift for resizing.\n Returns:\n frames (tensor): spatially sampled frames.\n \"\"\"\n assert spatial_idx in [-1, 0, 1, 2]\n if spatial_idx == -1:\n if aspect_ratio is None and scale is None:\n frames, _ = transform.random_short_side_scale_jitter(\n images=frames,\n min_size=min_scale,\n max_size=max_scale,\n inverse_uniform_sampling=inverse_uniform_sampling,\n )\n frames, _ = transform.random_crop(frames, crop_size)\n else:\n transform_func = (\n transform.random_resized_crop_with_shift\n if motion_shift\n else transform.random_resized_crop\n )\n frames = transform_func(\n images=frames,\n target_height=crop_size,\n target_width=crop_size,\n scale=scale,\n ratio=aspect_ratio,\n )\n if random_horizontal_flip:\n frames, _ = transform.horizontal_flip(0.5, frames)\n else:\n # The testing is deterministic and no jitter should be performed.\n # min_scale, max_scale, and crop_size are expect to be the same.\n assert len({min_scale, max_scale}) == 1\n frames, _ = transform.random_short_side_scale_jitter(\n frames, min_scale, max_scale\n )\n frames, _ = transform.uniform_crop(frames, crop_size, spatial_idx)\n return frames\n\ndef spatial_sampling_bbox(\n frames,\n boxes,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=224,\n random_horizontal_flip=True,\n inverse_uniform_sampling=False,\n aspect_ratio=None,\n scale=None,\n motion_shift=False,\n):\n \"\"\"\n Perform spatial sampling on the given video frames. If spatial_idx is\n -1, perform random scale, random crop, and random flip on the given\n frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling\n with the given spatial_idx.\n Args:\n frames (tensor): frames of images sampled from the video. The\n dimension is `num frames` x `height` x `width` x `channel`.\n spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,\n or 2, perform left, center, right crop if width is larger than\n height, and perform top, center, buttom crop if height is larger\n than width.\n min_scale (int): the minimal size of scaling.\n max_scale (int): the maximal size of scaling.\n crop_size (int): the size of height and width used to crop the\n frames.\n inverse_uniform_sampling (bool): if True, sample uniformly in\n [1 / max_scale, 1 / min_scale] and take a reciprocal to get the\n scale. If False, take a uniform sample from [min_scale,\n max_scale].\n aspect_ratio (list): Aspect ratio range for resizing.\n scale (list): Scale range for resizing.\n motion_shift (bool): Whether to apply motion shift for resizing.\n Returns:\n frames (tensor): spatially sampled frames.\n \"\"\"\n assert spatial_idx in [-1, 0, 1, 2]\n if spatial_idx == -1:\n if aspect_ratio is None and scale is None:\n frames, boxes = transform.random_short_side_scale_jitter(\n images=frames,\n boxes=boxes,\n min_size=min_scale,\n max_size=max_scale,\n inverse_uniform_sampling=inverse_uniform_sampling,\n )\n frames, boxes = transform.random_crop(frames, crop_size, boxes=boxes)\n else:\n transform_func = (\n transform.random_resized_crop_with_shift\n if motion_shift\n else transform.random_resized_crop\n )\n frames = transform_func(\n images=frames,\n target_height=crop_size,\n target_width=crop_size,\n scale=scale,\n ratio=aspect_ratio,\n )\n if random_horizontal_flip:\n frames, boxes = transform.horizontal_flip(0.5, frames, boxes=boxes)\n else:\n # The testing is deterministic and no jitter should be performed.\n # min_scale, max_scale, and crop_size are expect to be the same.\n assert len({min_scale, max_scale}) == 1\n frames, boxes = transform.random_short_side_scale_jitter(\n frames, min_scale, max_scale, boxes=boxes\n )\n frames, boxes = transform.uniform_crop(frames, crop_size, spatial_idx, boxes=boxes)\n return frames, boxes\n\ndef as_binary_vector(labels, num_classes):\n \"\"\"\n Construct binary label vector given a list of label indices.\n Args:\n labels (list): The input label list.\n num_classes (int): Number of classes of the label vector.\n Returns:\n labels (numpy array): the resulting binary vector.\n \"\"\"\n label_arr = np.zeros((num_classes,))\n\n for lbl in set(labels):\n label_arr[lbl] = 1.0\n return label_arr\n\n\ndef aggregate_labels(label_list):\n \"\"\"\n Join a list of label list.\n Args:\n labels (list): The input label list.\n Returns:\n labels (list): The joint list of all lists in input.\n \"\"\"\n all_labels = []\n for labels in label_list:\n for l in labels:\n all_labels.append(l)\n return list(set(all_labels))\n\n\ndef convert_to_video_level_labels(labels):\n \"\"\"\n Aggregate annotations from all frames of a video to form video-level labels.\n Args:\n labels (list): The input label list.\n Returns:\n labels (list): Same as input, but with each label replaced by\n a video-level one.\n \"\"\"\n for video_id in range(len(labels)):\n video_level_labels = aggregate_labels(labels[video_id])\n for i in range(len(labels[video_id])):\n labels[video_id][i] = video_level_labels\n return labels\n\n\ndef load_image_lists(frame_list_file, prefix=\"\", return_list=False):\n \"\"\"\n Load image paths and labels from a \"frame list\".\n Each line of the frame list contains:\n `original_vido_id video_id frame_id path labels`\n Args:\n frame_list_file (string): path to the frame list.\n prefix (str): the prefix for the path.\n return_list (bool): if True, return a list. If False, return a dict.\n Returns:\n image_paths (list or dict): list of list containing path to each frame.\n If return_list is False, then return in a dict form.\n labels (list or dict): list of list containing label of each frame.\n If return_list is False, then return in a dict form.\n \"\"\"\n image_paths = defaultdict(list)\n labels = defaultdict(list)\n with g_pathmgr.open(frame_list_file, \"r\") as f:\n assert f.readline().startswith(\"original_vido_id\")\n for line in f:\n row = line.split()\n # original_vido_id video_id frame_id path labels\n assert len(row) == 5\n video_name = row[0]\n if prefix == \"\":\n path = row[3]\n else:\n path = os.path.join(prefix, row[3])\n image_paths[video_name].append(path)\n frame_labels = row[-1].replace('\"', \"\")\n if frame_labels != \"\":\n labels[video_name].append(\n [int(x) for x in frame_labels.split(\",\")]\n )\n else:\n labels[video_name].append([])\n\n if return_list:\n keys = image_paths.keys()\n image_paths = [image_paths[key] for key in keys]\n labels = [labels[key] for key in keys]\n return image_paths, labels\n return dict(image_paths), dict(labels)\n\n\ndef tensor_normalize(tensor, mean, std):\n \"\"\"\n Normalize a given tensor by subtracting the mean and dividing the std.\n Args:\n tensor (tensor): tensor to normalize.\n mean (tensor or list): mean value to subtract.\n std (tensor or list): std to divide.\n \"\"\"\n if tensor.dtype == torch.uint8:\n tensor = tensor.float()\n tensor = tensor / 255.0\n if type(mean) == list:\n mean = torch.tensor(mean)\n if type(std) == list:\n std = torch.tensor(std)\n tensor = tensor - mean\n tensor = tensor / std\n return tensor\n\n\ndef get_random_sampling_rate(long_cycle_sampling_rate, sampling_rate):\n \"\"\"\n When multigrid training uses a fewer number of frames, we randomly\n increase the sampling rate so that some clips cover the original span.\n \"\"\"\n if long_cycle_sampling_rate > 0:\n assert long_cycle_sampling_rate >= sampling_rate\n return random.randint(sampling_rate, long_cycle_sampling_rate)\n else:\n return sampling_rate\n\n\ndef revert_tensor_normalize(tensor, mean, std):\n \"\"\"\n Revert normalization for a given tensor by multiplying by the std and adding the mean.\n Args:\n tensor (tensor): tensor to revert normalization.\n mean (tensor or list): mean value to add.\n std (tensor or list): std to multiply.\n \"\"\"\n if type(mean) == list:\n mean = torch.tensor(mean)\n if type(std) == list:\n std = torch.tensor(std)\n tensor = tensor * std\n tensor = tensor + mean\n return tensor\n\n\ndef create_sampler(dataset, shuffle, cfg):\n \"\"\"\n Create sampler for the given dataset.\n Args:\n dataset (torch.utils.data.Dataset): the given dataset.\n shuffle (bool): set to ``True`` to have the data reshuffled\n at every epoch.\n cfg (CfgNode): configs. Details can be found in\n slowfast/config/defaults.py\n Returns:\n sampler (Sampler): the created sampler.\n \"\"\"\n sampler = DistributedSampler(dataset) if cfg.NUM_GPUS > 1 else None\n\n return sampler\n\n\ndef loader_worker_init_fn(dataset):\n \"\"\"\n Create init function passed to pytorch data loader.\n Args:\n dataset (torch.utils.data.Dataset): the given dataset.\n \"\"\"\n return None\n"
] | [
[
"torch.linspace",
"torch.utils.data.distributed.DistributedSampler",
"numpy.stack",
"torch.tensor",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cclauss/kornia | [
"bf2f45873f0204fcb0f8bfab51fd4ed1316935c5",
"bf2f45873f0204fcb0f8bfab51fd4ed1316935c5"
] | [
"kornia/geometry/epipolar/numeric.py",
"test/augmentation/test_container.py"
] | [
"\"\"\"Module containing numerical functionalities for SfM\"\"\"\n\nimport torch\n\n# TODO: this should go to `kornia.geometry.linalg`\n\n\ndef cross_product_matrix(x: torch.Tensor) -> torch.Tensor:\n r\"\"\"Returns the cross_product_matrix symmetric matrix of a vector.\n\n Args:\n x: The input vector to construct the matrix in the shape :math:`(B, 3)`.\n\n Returns:\n The constructed cross_product_matrix symmetric matrix with shape :math:`(B, 3, 3)`.\n\n \"\"\"\n if not (len(x.shape) == 2 and x.shape[1] == 3):\n raise AssertionError(x.shape)\n # get vector compononens\n x0 = x[..., 0]\n x1 = x[..., 1]\n x2 = x[..., 2]\n\n # construct the matrix, reshape to 3x3 and return\n zeros = torch.zeros_like(x0)\n cross_product_matrix_flat = torch.stack([zeros, -x2, x1, x2, zeros, -x0, -x1, x0, zeros], dim=-1)\n return cross_product_matrix_flat.view(-1, 3, 3)\n\n\ndef eye_like(n: int, input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Returns a 2-D tensor with ones on the diagonal and zeros elsewhere with same size as the input.\n\n Args:\n n: the number of rows :math:`(N)`.\n input: image tensor that will determine the batch size of the output matrix.\n The expected shape is :math:`(B, *)`.\n\n Returns:\n The identity matrix with same size as input :math:`(*, N, N)`.\n\n \"\"\"\n if n <= 0:\n raise AssertionError(type(n), n)\n if len(input.shape) < 1:\n raise AssertionError(input.shape)\n\n identity = torch.eye(n, device=input.device, dtype=input.dtype)\n return identity[None].repeat(input.shape[0], 1, 1)\n\n\ndef vec_like(n, tensor):\n r\"\"\"Returns a 2-D tensor with a vector containing zeros with same size as the input.\n\n Args:\n n: the number of rows :math:`(N)`.\n input: image tensor that will determine the batch size of the output matrix.\n The expected shape is :math:`(B, *)`.\n\n Returns:\n The vector with same size as input :math:`(*, N, 1)`.\n\n \"\"\"\n if n <= 0:\n raise AssertionError(type(n), n)\n if len(tensor.shape) < 1:\n raise AssertionError(tensor.shape)\n\n vec = torch.zeros(n, 1, device=tensor.device, dtype=tensor.dtype)\n return vec[None].repeat(tensor.shape[0], 1, 1)\n",
"import pytest\nimport torch\n\nimport kornia\nimport kornia.augmentation as K\nfrom kornia.augmentation.base import MixAugmentationBase\nfrom kornia.constants import BorderType\nfrom kornia.geometry.bbox import bbox_to_mask\nfrom kornia.testing import assert_close\n\n\ndef reproducibility_test(input, seq):\n if isinstance(input, (tuple, list)):\n output_1 = seq(*input)\n output_2 = seq(*input, params=seq._params)\n else:\n output_1 = seq(input)\n output_2 = seq(input, params=seq._params)\n\n if isinstance(output_1, (tuple, list)) and isinstance(output_2, (tuple, list)):\n [\n assert_close(o1, o2)\n for o1, o2 in zip(output_1, output_2)\n if isinstance(o1, (torch.Tensor,)) and isinstance(o2, (torch.Tensor,))\n ]\n elif isinstance(output_1, (tuple, list)) and isinstance(output_2, (torch.Tensor,)):\n assert_close(output_1[0], output_2)\n elif isinstance(output_2, (tuple, list)) and isinstance(output_1, (torch.Tensor,)):\n assert_close(output_1, output_2[0])\n elif isinstance(output_2, (torch.Tensor,)) and isinstance(output_1, (torch.Tensor,)):\n assert_close(output_1, output_2, msg=f\"{seq._params}\")\n else:\n assert False, (\"cannot compare\", type(output_1), type(output_2))\n\n\nclass TestVideoSequential:\n @pytest.mark.parametrize('shape', [(3, 4), (2, 3, 4), (2, 3, 5, 6), (2, 3, 4, 5, 6, 7)])\n @pytest.mark.parametrize('data_format', [\"BCTHW\", \"BTCHW\"])\n def test_exception(self, shape, data_format, device, dtype):\n aug_list = K.VideoSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1), data_format=data_format, same_on_frame=True)\n with pytest.raises(AssertionError):\n input = torch.randn(*shape, device=device, dtype=dtype)\n output = aug_list(input)\n\n @pytest.mark.parametrize(\n 'augmentation',\n [\n K.RandomAffine(360, p=1.0),\n K.CenterCrop((3, 3), p=1.0),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),\n K.RandomCrop((5, 5), p=1.0),\n K.RandomErasing(p=1.0),\n K.RandomGrayscale(p=1.0),\n K.RandomHorizontalFlip(p=1.0),\n K.RandomVerticalFlip(p=1.0),\n K.RandomPerspective(p=1.0),\n K.RandomResizedCrop((5, 5), p=1.0),\n K.RandomRotation(360.0, p=1.0),\n K.RandomSolarize(p=1.0),\n K.RandomPosterize(p=1.0),\n K.RandomSharpness(p=1.0),\n K.RandomEqualize(p=1.0),\n K.RandomMotionBlur(3, 35.0, 0.5, p=1.0),\n K.Normalize(torch.tensor([0.5, 0.5, 0.5]), torch.tensor([0.5, 0.5, 0.5]), p=1.0),\n K.Denormalize(torch.tensor([0.5, 0.5, 0.5]), torch.tensor([0.5, 0.5, 0.5]), p=1.0),\n ],\n )\n @pytest.mark.parametrize('data_format', [\"BCTHW\", \"BTCHW\"])\n def test_augmentation(self, augmentation, data_format, device, dtype):\n input = torch.randint(255, (1, 3, 3, 5, 6), device=device, dtype=dtype).repeat(2, 1, 1, 1, 1) / 255.0\n torch.manual_seed(21)\n aug_list = K.VideoSequential(augmentation, data_format=data_format, same_on_frame=True)\n reproducibility_test(input, aug_list)\n\n @pytest.mark.parametrize(\n 'augmentations',\n [\n [K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0), K.RandomAffine(360, p=1.0)],\n [K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0), K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0)],\n [K.RandomAffine(360, p=1.0), kornia.color.BgrToRgb()],\n [K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.0), K.RandomAffine(360, p=0.0)],\n [K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.0)],\n [K.RandomAffine(360, p=0.0)],\n [K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0), K.RandomAffine(360, p=1.0), K.RandomMixUp(p=1.0)],\n ],\n )\n @pytest.mark.parametrize('data_format', [\"BCTHW\", \"BTCHW\"])\n @pytest.mark.parametrize('random_apply', [1, (1, 1), (1,), 10, True, False])\n def test_same_on_frame(self, augmentations, data_format, random_apply, device, dtype):\n aug_list = K.VideoSequential(\n *augmentations, data_format=data_format, same_on_frame=True, random_apply=random_apply\n )\n\n if data_format == 'BCTHW':\n input = torch.randn(2, 3, 1, 5, 6, device=device, dtype=dtype).repeat(1, 1, 4, 1, 1)\n output = aug_list(input)\n if aug_list.return_label:\n output, label = output\n assert (output[:, :, 0] == output[:, :, 1]).all()\n assert (output[:, :, 1] == output[:, :, 2]).all()\n assert (output[:, :, 2] == output[:, :, 3]).all()\n if data_format == 'BTCHW':\n input = torch.randn(2, 1, 3, 5, 6, device=device, dtype=dtype).repeat(1, 4, 1, 1, 1)\n output = aug_list(input)\n if aug_list.return_label:\n output, label = output\n assert (output[:, 0] == output[:, 1]).all()\n assert (output[:, 1] == output[:, 2]).all()\n assert (output[:, 2] == output[:, 3]).all()\n reproducibility_test(input, aug_list)\n\n @pytest.mark.parametrize(\n 'augmentations',\n [\n [K.RandomAffine(360, p=1.0)],\n [K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0)],\n [K.RandomAffine(360, p=0.0), K.ImageSequential(K.RandomAffine(360, p=0.0))],\n ],\n )\n @pytest.mark.parametrize('data_format', [\"BCTHW\", \"BTCHW\"])\n def test_against_sequential(self, augmentations, data_format, device, dtype):\n aug_list_1 = K.VideoSequential(*augmentations, data_format=data_format, same_on_frame=False)\n aug_list_2 = torch.nn.Sequential(*augmentations)\n\n if data_format == 'BCTHW':\n input = torch.randn(2, 3, 1, 5, 6, device=device, dtype=dtype).repeat(1, 1, 4, 1, 1)\n if data_format == 'BTCHW':\n input = torch.randn(2, 1, 3, 5, 6, device=device, dtype=dtype).repeat(1, 4, 1, 1, 1)\n\n torch.manual_seed(0)\n output_1 = aug_list_1(input)\n\n torch.manual_seed(0)\n if data_format == 'BCTHW':\n input = input.transpose(1, 2)\n output_2 = aug_list_2(input.reshape(-1, 3, 5, 6))\n output_2 = output_2.view(2, 4, 3, 5, 6)\n if data_format == 'BCTHW':\n output_2 = output_2.transpose(1, 2)\n assert (output_1 == output_2).all(), dict(aug_list_1._params)\n\n @pytest.mark.jit\n @pytest.mark.skip(reason=\"turn off due to Union Type\")\n def test_jit(self, device, dtype):\n B, C, D, H, W = 2, 3, 5, 4, 4\n img = torch.ones(B, C, D, H, W, device=device, dtype=dtype)\n op = K.VideoSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1), same_on_frame=True)\n op_jit = torch.jit.script(op)\n assert_close(op(img), op_jit(img))\n\n\nclass TestSequential:\n @pytest.mark.parametrize('same_on_batch', [True, False, None])\n @pytest.mark.parametrize(\"return_transform\", [True, False, None])\n @pytest.mark.parametrize(\"keepdim\", [True, False, None])\n @pytest.mark.parametrize('random_apply', [1, (2, 2), (1, 2), (2,), 20, True, False])\n def test_construction(self, same_on_batch, return_transform, keepdim, random_apply):\n aug = K.ImageSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),\n K.RandomAffine(360, p=1.0),\n K.RandomMixUp(p=1.0),\n same_on_batch=same_on_batch,\n return_transform=return_transform,\n keepdim=keepdim,\n random_apply=random_apply,\n )\n c = 0\n for a in aug.get_forward_sequence():\n if isinstance(a, (MixAugmentationBase,)):\n c += 1\n assert c < 2\n aug.same_on_batch = True\n aug.return_transform = True\n aug.keepdim = True\n for m in aug.children():\n assert m.same_on_batch is True, m.same_on_batch\n if not isinstance(m, (MixAugmentationBase,)):\n assert m.return_transform is True, m.return_transform\n assert m.keepdim is True, m.keepdim\n\n @pytest.mark.parametrize(\"return_transform\", [True, False, None])\n @pytest.mark.parametrize('random_apply', [1, (2, 2), (1, 2), (2,), 10, True, False])\n def test_forward(self, return_transform, random_apply, device, dtype):\n inp = torch.randn(1, 3, 30, 30, device=device, dtype=dtype)\n aug = K.ImageSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),\n kornia.filters.MedianBlur((3, 3)),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0, return_transform=True),\n K.RandomAffine(360, p=1.0),\n K.RandomMixUp(p=1.0),\n return_transform=return_transform,\n random_apply=random_apply,\n )\n out = aug(inp)\n if aug.return_label:\n out, label = out\n if isinstance(out, (tuple,)):\n assert out[0].shape == inp.shape\n else:\n assert out.shape == inp.shape\n reproducibility_test(inp, aug)\n\n\nclass TestAugmentationSequential:\n @pytest.mark.parametrize(\n 'data_keys', [\"input\", [\"mask\", \"input\"], [\"input\", \"bbox_yxyx\"], [0, 10], [BorderType.REFLECT]]\n )\n @pytest.mark.parametrize(\"augmentation_list\", [K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0)])\n def test_exception(self, augmentation_list, data_keys, device, dtype):\n with pytest.raises(Exception): # AssertError and NotImplementedError\n K.AugmentationSequential(augmentation_list, data_keys=data_keys)\n\n @pytest.mark.parametrize('return_transform', [True, False])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n @pytest.mark.parametrize('random_apply', [1, (2, 2), (1, 2), (2,), 10, True, False])\n @pytest.mark.parametrize('inp', [torch.randn(1, 3, 1000, 500), torch.randn(3, 1000, 500)])\n def test_mixup(self, inp, return_transform, random_apply, same_on_batch, device, dtype):\n inp = torch.as_tensor(inp, device=device, dtype=dtype)\n aug = K.AugmentationSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),\n K.RandomAffine(360, p=1.0),\n K.RandomMixUp(p=1.0),\n data_keys=[\"input\"],\n random_apply=random_apply,\n return_transform=return_transform,\n same_on_batch=same_on_batch,\n )\n out = aug(inp)\n if aug.return_label:\n out, label = out\n if return_transform and isinstance(out, (tuple, list)):\n out = out[0]\n assert out.shape[-3:] == inp.shape[-3:]\n reproducibility_test(inp, aug)\n\n def test_random_flips(self, device, dtype):\n inp = torch.randn(1, 3, 510, 1020, device=device, dtype=dtype)\n bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]], device=device, dtype=dtype)\n\n expected_bbox_vertical_flip = torch.tensor(\n [[[355, 499], [660, 499], [660, 259], [355, 259]]], device=device, dtype=dtype\n )\n expected_bbox_horizontal_flip = torch.tensor(\n [[[664, 10], [359, 10], [359, 250], [664, 250]]], device=device, dtype=dtype\n )\n\n aug_ver = K.AugmentationSequential(\n K.RandomVerticalFlip(p=1.0), data_keys=[\"input\", \"bbox\"], return_transform=False, same_on_batch=False\n )\n\n aug_hor = K.AugmentationSequential(\n K.RandomHorizontalFlip(p=1.0), data_keys=[\"input\", \"bbox\"], return_transform=False, same_on_batch=False\n )\n\n out_ver = aug_ver(inp, bbox)\n out_hor = aug_hor(inp, bbox)\n\n assert_close(out_ver[1], expected_bbox_vertical_flip)\n assert_close(out_hor[1], expected_bbox_horizontal_flip)\n\n @pytest.mark.parametrize('random_apply', [1, (2, 2), (1, 2), (2,), 10, True, False])\n @pytest.mark.parametrize('return_transform', [True, False])\n def test_forward_and_inverse(self, random_apply, return_transform, device, dtype):\n inp = torch.randn(1, 3, 1000, 500, device=device, dtype=dtype)\n bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]], device=device, dtype=dtype)\n keypoints = torch.tensor([[[465, 115], [545, 116]]], device=device, dtype=dtype)\n mask = bbox_to_mask(\n torch.tensor([[[155, 0], [900, 0], [900, 400], [155, 400]]], device=device, dtype=dtype), 1000, 500\n )[:, None].float()\n aug = K.AugmentationSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0),\n K.RandomAffine(360, p=1.0),\n data_keys=[\"input\", \"mask\", \"bbox\", \"keypoints\"],\n random_apply=random_apply,\n return_transform=return_transform,\n )\n out = aug(inp, mask, bbox, keypoints)\n if return_transform and isinstance(out, (tuple, list)):\n assert out[0][0].shape == inp.shape\n else:\n assert out[0].shape == inp.shape\n assert out[1].shape == mask.shape\n assert out[2].shape == bbox.shape\n assert out[3].shape == keypoints.shape\n reproducibility_test((inp, mask, bbox, keypoints), aug)\n\n out_inv = aug.inverse(*out)\n assert out_inv[0].shape == inp.shape\n assert out_inv[1].shape == mask.shape\n assert out_inv[2].shape == bbox.shape\n assert out_inv[3].shape == keypoints.shape\n\n def test_individual_forward_and_inverse(self, device, dtype):\n inp = torch.randn(1, 3, 1000, 500, device=device, dtype=dtype)\n bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]], device=device, dtype=dtype)\n keypoints = torch.tensor([[[465, 115], [545, 116]]], device=device, dtype=dtype)\n mask = bbox_to_mask(\n torch.tensor([[[155, 0], [900, 0], [900, 400], [155, 400]]], device=device, dtype=dtype), 1000, 500\n )[:, None].float()\n\n aug = K.AugmentationSequential(\n K.RandomAffine(360, p=1.0, return_transform=False), data_keys=['input', 'mask', 'bbox', 'keypoints']\n )\n reproducibility_test((inp, mask, bbox, keypoints), aug)\n\n aug = K.AugmentationSequential(K.RandomAffine(360, p=1.0, return_transform=True))\n assert aug(inp, data_keys=['input'])[0].shape == inp.shape\n aug = K.AugmentationSequential(K.RandomAffine(360, p=1.0, return_transform=False))\n assert aug(inp, data_keys=['input']).shape == inp.shape\n assert aug(mask, data_keys=['mask'], params=aug._params).shape == mask.shape\n\n assert aug.inverse(inp, data_keys=['input']).shape == inp.shape\n assert aug.inverse(bbox, data_keys=['bbox']).shape == bbox.shape\n assert aug.inverse(keypoints, data_keys=['keypoints']).shape == keypoints.shape\n assert aug.inverse(mask, data_keys=['mask']).shape == mask.shape\n\n @pytest.mark.parametrize('random_apply', [2, (1, 1), (2,), 10, True, False])\n def test_forward_and_inverse_return_transform(self, random_apply, device, dtype):\n inp = torch.randn(1, 3, 1000, 500, device=device, dtype=dtype)\n bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]], device=device, dtype=dtype)\n keypoints = torch.tensor([[[465, 115], [545, 116]]], device=device, dtype=dtype)\n mask = bbox_to_mask(\n torch.tensor([[[155, 0], [900, 0], [900, 400], [155, 400]]], device=device, dtype=dtype), 1000, 500\n )[:, None].float()\n aug = K.AugmentationSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0, return_transform=True),\n K.RandomAffine(360, p=1.0, return_transform=True),\n data_keys=[\"input\", \"mask\", \"bbox\", \"keypoints\"],\n random_apply=random_apply,\n )\n out = aug(inp, mask, bbox, keypoints)\n assert out[0][0].shape == inp.shape\n assert out[1].shape == mask.shape\n assert out[2].shape == bbox.shape\n assert out[3].shape == keypoints.shape\n\n reproducibility_test((inp, mask, bbox, keypoints), aug)\n\n out_inv = aug.inverse(*out)\n assert out_inv[0].shape == inp.shape\n assert out_inv[1].shape == mask.shape\n assert out_inv[2].shape == bbox.shape\n assert out_inv[3].shape == keypoints.shape\n\n @pytest.mark.parametrize('random_apply', [1, (2, 2), (1, 2), (2,), 10, True, False])\n def test_inverse_and_forward_return_transform(self, random_apply, device, dtype):\n inp = torch.randn(1, 3, 1000, 500, device=device, dtype=dtype)\n bbox = torch.tensor([[[355, 10], [660, 10], [660, 250], [355, 250]]], device=device, dtype=dtype)\n keypoints = torch.tensor([[[465, 115], [545, 116]]], device=device, dtype=dtype)\n mask = bbox_to_mask(\n torch.tensor([[[155, 0], [900, 0], [900, 400], [155, 400]]], device=device, dtype=dtype), 1000, 500\n )[:, None].float()\n aug = K.AugmentationSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0, return_transform=True),\n K.RandomAffine(360, p=1.0, return_transform=True),\n data_keys=[\"input\", \"mask\", \"bbox\", \"keypoints\"],\n random_apply=random_apply,\n )\n with pytest.raises(Exception): # No parameters avaliable for inversing.\n aug.inverse(inp, mask, bbox, keypoints)\n\n out = aug(inp, mask, bbox, keypoints)\n assert out[0][0].shape == inp.shape\n assert out[1].shape == mask.shape\n assert out[2].shape == bbox.shape\n assert out[3].shape == keypoints.shape\n\n reproducibility_test((inp, mask, bbox, keypoints), aug)\n\n @pytest.mark.jit\n @pytest.mark.skip(reason=\"turn off due to Union Type\")\n def test_jit(self, device, dtype):\n B, C, H, W = 2, 3, 4, 4\n img = torch.ones(B, C, H, W, device=device, dtype=dtype)\n op = K.AugmentationSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=1.0), K.RandomAffine(360, p=1.0), same_on_batch=True\n )\n op_jit = torch.jit.script(op)\n assert_close(op(img), op_jit(img))\n\n\nclass TestPatchSequential:\n @pytest.mark.parametrize(\n 'error_param',\n [\n {\"random_apply\": False, \"patchwise_apply\": True, \"grid_size\": (2, 3)},\n {\"random_apply\": 2, \"patchwise_apply\": True},\n {\"random_apply\": (2, 3), \"patchwise_apply\": True},\n ],\n )\n def test_exception(self, error_param):\n with pytest.raises(Exception): # AssertError and NotImplementedError\n K.PatchSequential(\n K.ImageSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),\n K.RandomPerspective(0.2, p=0.5),\n K.RandomSolarize(0.1, 0.1, p=0.5),\n ),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1),\n K.ImageSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),\n K.RandomPerspective(0.2, p=0.5),\n K.RandomSolarize(0.1, 0.1, p=0.5),\n ),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1),\n **error_param,\n )\n\n @pytest.mark.parametrize('shape', [(2, 3, 24, 24)])\n @pytest.mark.parametrize('padding', [\"same\", \"valid\"])\n @pytest.mark.parametrize('patchwise_apply', [True, False])\n @pytest.mark.parametrize('same_on_batch', [True, False, None])\n @pytest.mark.parametrize('keepdim', [True, False, None])\n @pytest.mark.parametrize('random_apply', [1, (2, 2), (1, 2), (2,), 10, True, False])\n def test_forward(self, shape, padding, patchwise_apply, same_on_batch, keepdim, random_apply, device, dtype):\n torch.manual_seed(11)\n try: # skip wrong param settings.\n seq = K.PatchSequential(\n kornia.color.RgbToBgr(),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1),\n K.ImageSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),\n K.RandomPerspective(0.2, p=0.5),\n K.RandomSolarize(0.1, 0.1, p=0.5),\n ),\n K.RandomMixUp(p=1.0),\n grid_size=(2, 2),\n padding=padding,\n patchwise_apply=patchwise_apply,\n same_on_batch=same_on_batch,\n keepdim=keepdim,\n random_apply=random_apply,\n )\n except:\n return\n\n input = torch.randn(*shape, device=device, dtype=dtype)\n out = seq(input)\n if seq.return_label:\n out, label = out\n assert out.shape[-3:] == input.shape[-3:]\n\n reproducibility_test(input, seq)\n\n def test_intensity_only(self):\n seq = K.PatchSequential(\n K.ImageSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),\n K.RandomPerspective(0.2, p=0.5),\n K.RandomSolarize(0.1, 0.1, p=0.5),\n ),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1),\n K.ImageSequential(\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),\n K.RandomPerspective(0.2, p=0.5),\n K.RandomSolarize(0.1, 0.1, p=0.5),\n ),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1),\n grid_size=(2, 2),\n )\n assert not seq.is_intensity_only()\n\n seq = K.PatchSequential(\n K.ImageSequential(K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5)),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.5),\n K.ColorJitter(0.1, 0.1, 0.1, 0.1),\n grid_size=(2, 2),\n )\n assert seq.is_intensity_only()\n"
] | [
[
"torch.stack",
"torch.eye",
"torch.zeros_like",
"torch.zeros"
],
[
"torch.jit.script",
"torch.nn.Sequential",
"torch.ones",
"torch.randint",
"torch.manual_seed",
"torch.randn",
"torch.tensor",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.