repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
ChAnYaNG97/datasets | [
"0a45e2ea98716d325fc1c5e5494f2575f3bdb908",
"0a45e2ea98716d325fc1c5e5494f2575f3bdb908"
] | [
"tensorflow_datasets/object_detection/voc.py",
"tensorflow_datasets/summarization/opinion_abstracts.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PASCAL VOC datasets.\"\"\"\n\nimport os\nimport xml.etree.ElementTree\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n\n_VOC_CITATION = \"\"\"\\\n@misc{{pascal-voc-{year},\n\tauthor = \"Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.\",\n\ttitle = \"The {{PASCAL}} {{V}}isual {{O}}bject {{C}}lasses {{C}}hallenge {year} {{(VOC{year})}} {{R}}esults\",\n\thowpublished = \"http://www.pascal-network.org/challenges/VOC/voc{year}/workshop/index.html\"}}\n\"\"\"\n_VOC_DESCRIPTION = \"\"\"\\\nThis dataset contains the data from the PASCAL Visual Object Classes Challenge\n{year}, a.k.a. VOC{year}, corresponding to the Classification and Detection\ncompetitions.\nA total of {num_images} images are included in this dataset, where each image\ncontains a set of objects, out of 20 different classes, making a total of\n{num_objects} annotated objects.\nIn the Classification competition, the goal is to predict the set of labels\ncontained in the image, while in the Detection competition the goal is to\npredict the bounding box and label of each individual object.\nWARNING: As per the official dataset, the test set of VOC2012 does not contain\nannotations.\n\"\"\"\n_VOC_URL = \"http://host.robots.ox.ac.uk/pascal/VOC/voc{year}/\"\n# Original site, it is down very often.\n# _VOC_DATA_URL = \"http://host.robots.ox.ac.uk/pascal/VOC/voc{year}/\"\n# Data mirror:\n_VOC_DATA_URL = \"http://pjreddie.com/media/files/\"\n_VOC_LABELS = (\n \"aeroplane\",\n \"bicycle\",\n \"bird\",\n \"boat\",\n \"bottle\",\n \"bus\",\n \"car\",\n \"cat\",\n \"chair\",\n \"cow\",\n \"diningtable\",\n \"dog\",\n \"horse\",\n \"motorbike\",\n \"person\",\n \"pottedplant\",\n \"sheep\",\n \"sofa\",\n \"train\",\n \"tvmonitor\",\n)\n_VOC_POSES = (\n \"frontal\",\n \"rear\",\n \"left\",\n \"right\",\n \"unspecified\",\n)\n\n\ndef _get_example_objects(annon_filepath):\n \"\"\"Function to get all the objects from the annotation XML file.\"\"\"\n with tf.io.gfile.GFile(annon_filepath, \"r\") as f:\n root = xml.etree.ElementTree.parse(f).getroot()\n\n # Disable pytype to avoid attribute-error due to find returning\n # Optional[Element]\n # pytype: disable=attribute-error\n size = root.find(\"size\")\n width = float(size.find(\"width\").text)\n height = float(size.find(\"height\").text)\n\n for obj in root.findall(\"object\"):\n # Get object's label name.\n label = obj.find(\"name\").text.lower()\n # Get objects' pose name.\n pose = obj.find(\"pose\").text.lower()\n is_truncated = (obj.find(\"truncated\").text == \"1\")\n is_difficult = (obj.find(\"difficult\").text == \"1\")\n bndbox = obj.find(\"bndbox\")\n xmax = float(bndbox.find(\"xmax\").text)\n xmin = float(bndbox.find(\"xmin\").text)\n ymax = float(bndbox.find(\"ymax\").text)\n ymin = float(bndbox.find(\"ymin\").text)\n yield {\n \"label\": label,\n \"pose\": pose,\n \"bbox\": tfds.features.BBox(\n ymin / height, xmin / width, ymax / height, xmax / width),\n \"is_truncated\": is_truncated,\n \"is_difficult\": is_difficult,\n }\n # pytype: enable=attribute-error\n\n\nclass VocConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for Voc.\"\"\"\n\n def __init__(\n self, year=None, filenames=None, has_test_annotations=True, **kwargs):\n self.year = year\n self.filenames = filenames\n self.has_test_annotations = has_test_annotations\n super(VocConfig, self).__init__(\n name=year,\n # Version history:\n # 4.0.0: Added BuildConfig and 2012 version support, deprecate Voc2007.\n # 3.0.0: S3 with new hashing function (different shuffle).\n # 2.0.0: S3 (new shuffling, sharding and slicing mechanism).\n version=tfds.core.Version(\"4.0.0\"),\n **kwargs)\n\n\nclass Voc(tfds.core.GeneratorBasedBuilder):\n \"\"\"Pascal VOC 2007 or 2012.\"\"\"\n\n BUILDER_CONFIGS = [\n VocConfig(\n year=\"2007\",\n description=_VOC_DESCRIPTION.format(\n year=2007, num_images=9963, num_objects=24640),\n filenames={\n \"trainval\": \"VOCtrainval_06-Nov-2007.tar\",\n \"test\": \"VOCtest_06-Nov-2007.tar\",\n },\n has_test_annotations=True,\n ),\n VocConfig(\n year=\"2012\",\n description=_VOC_DESCRIPTION.format(\n year=2012, num_images=11540, num_objects=27450),\n filenames={\n \"trainval\": \"VOCtrainval_11-May-2012.tar\",\n \"test\": \"VOC2012test.tar\",\n },\n has_test_annotations=False,\n ),\n ]\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=self.builder_config.description,\n features=tfds.features.FeaturesDict({\n \"image\": tfds.features.Image(),\n \"image/filename\": tfds.features.Text(),\n \"objects\": tfds.features.Sequence({\n \"label\": tfds.features.ClassLabel(names=_VOC_LABELS),\n \"bbox\": tfds.features.BBoxFeature(),\n \"pose\": tfds.features.ClassLabel(names=_VOC_POSES),\n \"is_truncated\": tf.bool,\n \"is_difficult\": tf.bool,\n }),\n \"labels\": tfds.features.Sequence(\n tfds.features.ClassLabel(names=_VOC_LABELS)),\n \"labels_no_difficult\": tfds.features.Sequence(\n tfds.features.ClassLabel(names=_VOC_LABELS)),\n }),\n homepage=_VOC_URL.format(year=self.builder_config.year),\n citation=_VOC_CITATION.format(year=self.builder_config.year),\n )\n\n def _split_generators(self, dl_manager):\n paths = dl_manager.download_and_extract({\n k: os.path.join(_VOC_DATA_URL, v)\n for k, v in self.builder_config.filenames.items()\n })\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n gen_kwargs=dict(data_path=paths[\"test\"], set_name=\"test\")),\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs=dict(data_path=paths[\"trainval\"], set_name=\"train\")),\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n gen_kwargs=dict(data_path=paths[\"trainval\"], set_name=\"val\")),\n ]\n\n def _generate_examples(self, data_path, set_name):\n \"\"\"Yields examples.\"\"\"\n set_filepath = os.path.join(\n data_path,\n os.path.normpath(\"VOCdevkit/VOC{}/ImageSets/Main/{}.txt\".format(\n self.builder_config.year, set_name)))\n load_annotations = (\n self.builder_config.has_test_annotations or set_name != \"test\")\n with tf.io.gfile.GFile(set_filepath, \"r\") as f:\n for line in f:\n image_id = line.strip()\n example = self._generate_example(data_path, image_id, load_annotations)\n yield image_id, example\n\n def _generate_example(self, data_path, image_id, load_annotations):\n image_filepath = os.path.join(\n data_path,\n os.path.normpath(\"VOCdevkit/VOC{}/JPEGImages/{}.jpg\".format(\n self.builder_config.year, image_id)))\n annon_filepath = os.path.join(\n data_path,\n os.path.normpath(\"VOCdevkit/VOC{}/Annotations/{}.xml\".format(\n self.builder_config.year, image_id)))\n if load_annotations:\n objects = list(_get_example_objects(annon_filepath))\n # Use set() to remove duplicates\n labels = sorted(set(obj[\"label\"] for obj in objects))\n labels_no_difficult = sorted(set(\n obj[\"label\"] for obj in objects if obj[\"is_difficult\"] == 0\n ))\n else: # The test set of VOC2012 does not contain annotations\n objects = []\n labels = []\n labels_no_difficult = []\n return {\n \"image\": image_filepath,\n \"image/filename\": image_id + \".jpg\",\n \"objects\": objects,\n \"labels\": labels,\n \"labels_no_difficult\": labels_no_difficult,\n }\n",
"# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Opinion Abstracts Dataset.\"\"\"\n\nimport json\nimport os\nfrom typing import Any, Dict, Iterator, List, Text, Tuple\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\n@inproceedings{wang-ling-2016-neural,\n title = \"Neural Network-Based Abstract Generation for Opinions and Arguments\",\n author = \"Wang, Lu and\n Ling, Wang\",\n booktitle = \"Proceedings of the 2016 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies\",\n month = jun,\n year = \"2016\",\n address = \"San Diego, California\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/N16-1007\",\n doi = \"10.18653/v1/N16-1007\",\n pages = \"47--57\",\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\nThere are two sub datasets:\n\n(1) RottenTomatoes: The movie critics and consensus crawled from\nhttp://rottentomatoes.com/. It has fields of \"_movie_name\", \"_movie_id\",\n\"_critics\", and \"_critic_consensus\".\n\n(2) IDebate: The arguments crawled from http://idebate.org/. It has fields of\n\"_debate_name\", \"_debate_id\", \"_claim\", \"_claim_id\", \"_argument_sentences\".\n\n\"\"\"\n\n_URL = \"http://www.ccs.neu.edu/home/luwang/datasets/opinion_abstracts.zip\"\n\n\nclass OpinionAbstractsConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for OpinionAbstracts.\"\"\"\n\n def __init__(\n self,\n *,\n filename: Text = None,\n name_key: Text = None,\n id_key: Text = None,\n opinions_key: Text = None,\n summary_key: Text = None,\n **kwargs\n ):\n \"\"\"BuilderConfig for OpinionAbstracts.\"\"\"\n super(OpinionAbstractsConfig, self).__init__(\n version=tfds.core.Version(\"1.0.0\"), **kwargs)\n self.filename = filename\n self.name_key = name_key\n self.id_key = id_key\n self.opinions_key = opinions_key\n self.summary_key = summary_key\n\n\nclass OpinionAbstracts(tfds.core.GeneratorBasedBuilder):\n \"\"\"OpinionAbstracts Dataset Builder.\"\"\"\n\n VERSION = tfds.core.Version(\"1.0.0\")\n BUILDER_CONFIGS = [\n OpinionAbstractsConfig(\n name=\"rotten_tomatoes\",\n filename=\"rottentomatoes.json\",\n name_key=\"_movie_name\",\n id_key=\"_movie_id\",\n opinions_key=\"_critics\",\n summary_key=\"_critic_consensus\",\n description=\"Professional critics and consensus of 3,731 movies.\",\n ),\n OpinionAbstractsConfig(\n name=\"idebate\",\n filename=\"idebate.json\",\n name_key=\"_debate_name\",\n id_key=\"_claim_id\",\n opinions_key=\"_argument_sentences\",\n summary_key=\"_claim\",\n description=\"2,259 claims for 676 debates.\",\n )\n ]\n\n def _info(self) -> tfds.core.DatasetInfo:\n config = self.builder_config\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n config.name_key:\n tf.string,\n config.id_key:\n tf.string,\n config.summary_key:\n tf.string,\n config.opinions_key:\n tfds.features.Sequence(\n tfds.features.FeaturesDict({\n \"key\": tf.string,\n \"value\": tf.string\n })),\n }),\n supervised_keys=(config.opinions_key, config.summary_key),\n homepage=\"http://www.ccs.neu.edu/home/luwang/data.html\",\n citation=_CITATION,\n )\n\n def _split_generators(\n self, dl_manager: tfds.download.DownloadManager\n ) -> List[tfds.core.SplitGenerator]:\n \"\"\"Returns SplitGenerators.\"\"\"\n dl_path = dl_manager.download_and_extract(_URL)\n path = os.path.join(dl_path, \"opinion_abstracts\",\n self.builder_config.filename)\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\"path\": path},\n ),\n ]\n\n def _generate_examples(self,\n path: Text = None\n ) -> Iterator[Tuple[Text, Dict[Text, Any]]]:\n \"\"\"Yields examples.\"\"\"\n with tf.io.gfile.GFile(path, \"rb\") as f:\n for example in json.load(f):\n config = self.builder_config\n opinions = example[config.opinions_key].items()\n opinions = [{\"key\": k, \"value\": v} for k, v in opinions]\n features = {config.opinions_key: opinions}\n for k in [config.name_key, config.id_key, config.summary_key]:\n features[k] = example[k]\n yield example[config.id_key], features\n"
] | [
[
"tensorflow.compat.v2.io.gfile.GFile"
],
[
"tensorflow.compat.v2.io.gfile.GFile"
]
] |
codeproject/DeepStack | [
"d96368a3db1bc0266cb500ba3701d130834da0e6",
"d96368a3db1bc0266cb500ba3701d130834da0e6"
] | [
"windows_packages_gpu/torch/nn/grad.py",
"windows_packages_gpu/torch/testing/_internal/jit_metaprogramming_utils.py"
] | [
"\"\"\"Gradient interface\"\"\"\r\n\r\nimport torch\r\nfrom .modules.utils import _single, _pair, _triple\r\nimport warnings\r\n\r\n\r\ndef _grad_input_padding(grad_output, input_size, stride, padding, kernel_size, dilation=None):\r\n if dilation is None:\r\n # For backward compatibility\r\n warnings.warn(\"_grad_input_padding 'dilation' argument not provided. Default of 1 is used.\")\r\n dilation = [1] * len(stride)\r\n\r\n input_size = list(input_size)\r\n k = grad_output.dim() - 2\r\n\r\n if len(input_size) == k + 2:\r\n input_size = input_size[-k:]\r\n if len(input_size) != k:\r\n raise ValueError(\"input_size must have {} elements (got {})\"\r\n .format(k + 2, len(input_size)))\r\n\r\n def dim_size(d):\r\n return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] + 1\r\n + dilation[d] * (kernel_size[d] - 1))\r\n\r\n min_sizes = [dim_size(d) for d in range(k)]\r\n max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]\r\n for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):\r\n if size < min_size or size > max_size:\r\n raise ValueError(\r\n (\"requested an input grad size of {}, but valid sizes range \"\r\n \"from {} to {} (for a grad_output of {})\").format(\r\n input_size, min_sizes, max_sizes,\r\n grad_output.size()[2:]))\r\n\r\n return tuple(input_size[d] - min_sizes[d] for d in range(k))\r\n\r\n\r\ndef conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):\r\n r\"\"\"\r\n Computes the gradient of conv1d with respect to the input of the convolution.\r\n This is same as the 1D transposed convolution operator under the hood but requires\r\n the shape of the gradient w.r.t. input to be specified explicitly.\r\n\r\n Args:\r\n input_size : Shape of the input gradient tensor\r\n weight: weight tensor (out_channels x in_channels/groups x kW)\r\n grad_output : output gradient tensor (minibatch x out_channels x oW)\r\n stride (int or tuple, optional): Stride of the convolution. Default: 1\r\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\r\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\r\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\r\n\r\n Examples::\r\n\r\n >>> input = torch.randn(1,1,3, requires_grad=True)\r\n >>> weight = torch.randn(1,1,1, requires_grad=True)\r\n >>> output = F.conv1d(input, weight)\r\n >>> grad_output = torch.randn(output.shape)\r\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\r\n >>> F.grad.conv1d_input(input.shape, weight, grad_output)\r\n\r\n \"\"\"\r\n stride = _single(stride)\r\n padding = _single(padding)\r\n dilation = _single(dilation)\r\n kernel_size = [weight.shape[2]]\r\n\r\n if input_size is None:\r\n raise ValueError(\"grad.conv1d_input requires specifying an input_size\")\r\n\r\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\r\n padding, kernel_size, dilation)\r\n\r\n return torch.conv_transpose1d(\r\n grad_output, weight, None, stride, padding, grad_input_padding, groups,\r\n dilation)\r\n\r\n\r\ndef conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):\r\n r\"\"\"\r\n Computes the gradient of conv1d with respect to the weight of the convolution.\r\n\r\n Args:\r\n input: input tensor of shape (minibatch x in_channels x iW)\r\n weight_size : Shape of the weight gradient tensor\r\n grad_output : output gradient tensor (minibatch x out_channels x oW)\r\n stride (int or tuple, optional): Stride of the convolution. Default: 1\r\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\r\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\r\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\r\n\r\n Examples::\r\n\r\n >>> input = torch.randn(1,1,3, requires_grad=True)\r\n >>> weight = torch.randn(1,1,1, requires_grad=True)\r\n >>> output = F.conv1d(input, weight)\r\n >>> grad_output = torch.randn(output.shape)\r\n >>> grad_weight = torch.autograd.grad(output, filter, grad_output)\r\n >>> F.grad.conv1d_weight(input, weight.shape, grad_output)\r\n\r\n \"\"\"\r\n stride = _single(stride)\r\n padding = _single(padding)\r\n dilation = _single(dilation)\r\n in_channels = input.shape[1]\r\n out_channels = grad_output.shape[1]\r\n min_batch = input.shape[0]\r\n\r\n grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)\r\n grad_output = grad_output.contiguous().view(\r\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])\r\n\r\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\r\n input.shape[2])\r\n\r\n grad_weight = torch.conv1d(input, grad_output, None, dilation, padding,\r\n stride, in_channels * min_batch)\r\n\r\n grad_weight = grad_weight.contiguous().view(\r\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])\r\n\r\n return grad_weight.sum(dim=0).view(\r\n in_channels // groups, out_channels, grad_weight.shape[2]).transpose(\r\n 0, 1).narrow(2, 0, weight_size[2])\r\n\r\n\r\ndef conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):\r\n r\"\"\"\r\n Computes the gradient of conv2d with respect to the input of the convolution.\r\n This is same as the 2D transposed convolution operator under the hood but requires\r\n the shape of the gradient w.r.t. input to be specified explicitly.\r\n\r\n Args:\r\n input_size : Shape of the input gradient tensor\r\n weight: weight tensor (out_channels x in_channels/groups x kH x kW)\r\n grad_output : output gradient tensor (minibatch x out_channels x oH x oW)\r\n stride (int or tuple, optional): Stride of the convolution. Default: 1\r\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\r\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\r\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\r\n\r\n Examples::\r\n\r\n >>> input = torch.randn(1,1,3,3, requires_grad=True)\r\n >>> weight = torch.randn(1,1,1,2, requires_grad=True)\r\n >>> output = F.conv2d(input, weight)\r\n >>> grad_output = torch.randn(output.shape)\r\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\r\n >>> F.grad.conv2d_input(input.shape, weight, grad_output)\r\n\r\n \"\"\"\r\n stride = _pair(stride)\r\n padding = _pair(padding)\r\n dilation = _pair(dilation)\r\n kernel_size = (weight.shape[2], weight.shape[3])\r\n\r\n if input_size is None:\r\n raise ValueError(\"grad.conv2d_input requires specifying an input_size\")\r\n\r\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\r\n padding, kernel_size, dilation)\r\n\r\n return torch.conv_transpose2d(\r\n grad_output, weight, None, stride, padding, grad_input_padding, groups,\r\n dilation)\r\n\r\n\r\ndef conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):\r\n r\"\"\"\r\n Computes the gradient of conv2d with respect to the weight of the convolution.\r\n\r\n Args:\r\n input: input tensor of shape (minibatch x in_channels x iH x iW)\r\n weight_size : Shape of the weight gradient tensor\r\n grad_output : output gradient tensor (minibatch x out_channels x oH x oW)\r\n stride (int or tuple, optional): Stride of the convolution. Default: 1\r\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\r\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\r\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\r\n\r\n Examples::\r\n\r\n >>> input = torch.randn(1,1,3,3, requires_grad=True)\r\n >>> weight = torch.randn(1,1,1,2, requires_grad=True)\r\n >>> output = F.conv2d(input, weight)\r\n >>> grad_output = torch.randn(output.shape)\r\n >>> grad_weight = torch.autograd.grad(output, filter, grad_output)\r\n >>> F.grad.conv2d_weight(input, weight.shape, grad_output)\r\n\r\n \"\"\"\r\n stride = _pair(stride)\r\n padding = _pair(padding)\r\n dilation = _pair(dilation)\r\n in_channels = input.shape[1]\r\n out_channels = grad_output.shape[1]\r\n min_batch = input.shape[0]\r\n\r\n grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,\r\n 1)\r\n grad_output = grad_output.contiguous().view(\r\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],\r\n grad_output.shape[3])\r\n\r\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\r\n input.shape[2], input.shape[3])\r\n\r\n grad_weight = torch.conv2d(input, grad_output, None, dilation, padding,\r\n stride, in_channels * min_batch)\r\n\r\n grad_weight = grad_weight.contiguous().view(\r\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],\r\n grad_weight.shape[3])\r\n\r\n return grad_weight.sum(dim=0).view(\r\n in_channels // groups, out_channels,\r\n grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(\r\n 2, 0, weight_size[2]).narrow(3, 0, weight_size[3])\r\n\r\n\r\ndef conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):\r\n r\"\"\"\r\n Computes the gradient of conv3d with respect to the input of the convolution.\r\n This is same as the 3D transposed convolution operator under the hood but requires\r\n the shape of the gradient w.r.t. input to be specified explicitly.\r\n\r\n Args:\r\n input_size : Shape of the input gradient tensor\r\n weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)\r\n grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)\r\n stride (int or tuple, optional): Stride of the convolution. Default: 1\r\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\r\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\r\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\r\n\r\n Examples::\r\n\r\n >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)\r\n >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)\r\n >>> output = F.conv3d(input, weight)\r\n >>> grad_output = torch.randn(output.shape)\r\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\r\n >>> F.grad.conv3d_input(input.shape, weight, grad_output)\r\n\r\n \"\"\"\r\n stride = _triple(stride)\r\n padding = _triple(padding)\r\n dilation = _triple(dilation)\r\n kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])\r\n\r\n if input_size is None:\r\n raise ValueError(\"grad.conv3d_input requires specifying an input_size\")\r\n\r\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\r\n padding, kernel_size, dilation)\r\n\r\n return torch.conv_transpose3d(\r\n grad_output, weight, None, stride, padding, grad_input_padding, groups,\r\n dilation)\r\n\r\n\r\ndef conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):\r\n r\"\"\"\r\n Computes the gradient of conv3d with respect to the weight of the convolution.\r\n\r\n Args:\r\n input: input tensor of shape (minibatch x in_channels x iT x iH x iW)\r\n weight_size : Shape of the weight gradient tensor\r\n grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)\r\n stride (int or tuple, optional): Stride of the convolution. Default: 1\r\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\r\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\r\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\r\n\r\n Examples::\r\n\r\n >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)\r\n >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)\r\n >>> output = F.conv3d(input, weight)\r\n >>> grad_output = torch.randn(output.shape)\r\n >>> grad_weight = torch.autograd.grad(output, weight, grad_output)\r\n >>> F.grad.conv3d_weight(input, weight.shape, grad_output)\r\n\r\n \"\"\"\r\n stride = _triple(stride)\r\n padding = _triple(padding)\r\n dilation = _triple(dilation)\r\n in_channels = input.shape[1]\r\n out_channels = grad_output.shape[1]\r\n min_batch = input.shape[0]\r\n\r\n grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)\r\n grad_output = grad_output.contiguous().view(\r\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],\r\n grad_output.shape[3], grad_output.shape[4])\r\n\r\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\r\n input.shape[2], input.shape[3],\r\n input.shape[4])\r\n\r\n grad_weight = torch.conv3d(input, grad_output, None, dilation, padding,\r\n stride, in_channels * min_batch)\r\n\r\n grad_weight = grad_weight.contiguous().view(\r\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],\r\n grad_weight.shape[3], grad_weight.shape[4])\r\n\r\n return grad_weight.sum(dim=0).view(\r\n in_channels // groups, out_channels, grad_weight.shape[2],\r\n grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(\r\n 2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(\r\n 4, 0, weight_size[4])\r\n",
"# Torch\r\nfrom torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401\r\nfrom torch.testing._internal.common_methods_invocations import non_differentiable, create_input, \\\r\n unpack_variables\r\nimport torch.nn.functional as F\r\nimport torch\r\nimport torch.cuda\r\nimport torch.jit\r\nimport torch.jit._logging\r\nimport torch.jit.frontend\r\nfrom torch.testing._internal.common_nn import module_tests, new_module_tests\r\nfrom copy import deepcopy\r\nimport math # noqa: F401\r\n\r\n# Testing utils\r\nfrom torch._six import inf\r\ntorch.set_default_dtype(torch.double)\r\n\r\nL = 20\r\nM = 10\r\nS = 5\r\n\r\n# NB: JIT script tests for all nn functional interfaces, script mode does\r\n# not support in_place operations yet, so no inplace operation tests added.\r\n# removed all the deprecated functions\r\n#\r\n# (\r\n# method name,\r\n# input size/constructing fn,\r\n# args (tuple represents shape of a tensor arg),\r\n# test variant name(will be used at test name suffix,\r\n# 'inplace' skips grad tests), // optional\r\n# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional\r\n# fn to determine if test should be skipped, // optional\r\n# fn mapping output to part that should be gradcheck'ed, // optional\r\n# kwargs for function, // optional\r\n# )\r\nnn_functional_tests = [\r\n ('conv1d', (S, S, S), ((S, S, S),)),\r\n ('conv2d', (S, S, S, S), ((S, S, S, S),)),\r\n ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),\r\n ('conv_transpose1d', (S, S, S), ((S, S, S),)),\r\n ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),\r\n ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),\r\n ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),\r\n ('avg_pool1d', (S, S, S), (3,)),\r\n ('avg_pool2d', (S, S, S, S), (3,), '', (True,)),\r\n ('avg_pool3d', (S, S, S, S, S), (3,)),\r\n ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),\r\n ('max_pool1d', (S, S, S), (2, 1)),\r\n ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),\r\n ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),\r\n ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),\r\n ('max_pool3d', (S, S, S, S, S), (2, 1)),\r\n ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),\r\n ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),\r\n ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),\r\n ('lp_pool1d', (S, S, S), (2., 3, 2,)),\r\n ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),\r\n ('adaptive_max_pool1d', (S, S, S), (5,)),\r\n ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),\r\n ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),\r\n ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),\r\n ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),\r\n ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),\r\n ('dropout', (S, S, S), (0.5,), '', (True,\r\n ['aten::bernoulli_',\r\n 'aten::empty_like', 'aten::mul', 'aten::div'])),\r\n ('alpha_dropout', (S, S, S), (0.5,)),\r\n ('dropout2d', (S, S, S), (0.5,)),\r\n ('dropout3d', (S, S, S), (0.5,)),\r\n ('feature_alpha_dropout', (S, S, S), (0.5,)),\r\n ('threshold', (S, S, S), (0.1, 2.), '', (True,)),\r\n ('threshold', (S, S, S), (0.1, 2., True), 'inplace'),\r\n ('relu', (S, S, S), (), '', (True,)),\r\n ('relu', (S, S, S), (), 'inplace'),\r\n ('glu', (S - 1, S - 1, S - 1), (),),\r\n ('hardtanh', (S, S, S), (-0.5, 0.5),),\r\n ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),\r\n ('relu6', (S, S, S), (),),\r\n ('relu6', (S, S, S), (True), 'inplace'),\r\n ('elu', (S, S, S), (0.9,),),\r\n ('elu', (S, S, S), (0.9, True), 'inplace'),\r\n ('selu', (S, S, S), (),),\r\n ('selu', (S, S, S), (True), 'inplace'),\r\n ('celu', (S, S, S), (0.9,),),\r\n ('celu', (S, S, S), (0.9, True), 'inplace'),\r\n ('leaky_relu', (S, S, S), (0.02,),),\r\n ('leaky_relu', (S, S, S), (0.02,), 'inplace'),\r\n ('rrelu', (S, S), (0.1, 0.3, False),),\r\n ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),\r\n ('hardshrink', (S, S, S), (0.4,),),\r\n ('tanhshrink', (S, S, S), (),),\r\n ('softsign', (S, S, S), (),),\r\n ('softplus', (S, S, S), (),),\r\n ('softmin', (S, S, S), (0,),),\r\n ('softmax', (S, S, S), (0,), '', (True,)),\r\n ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),\r\n ('tanh', (S, S, S), (), '', (True,)),\r\n ('sigmoid', (S, S, S), (), '', (True,)),\r\n ('log_softmax', (S, S, S), (0,), '', (True,)),\r\n ('linear', (S, S), ((M, S),), '', (True, ['aten::t', 'aten::matmul'])),\r\n ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::add', 'aten::mm'])),\r\n ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),\r\n ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),\r\n ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),\r\n ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), ),\r\n '', (False, 'aten::_batch_norm_impl_index')),\r\n ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),\r\n ('layer_norm', (S, S, S, S), ([5],), '',\r\n (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),\r\n ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',\r\n (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),\r\n ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',\r\n (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),\r\n ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),\r\n non_differentiable(torch.rand(S))), 'with_weight_and_bias',\r\n (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),\r\n ('group_norm', (S, S, S), (1, torch.rand(5),),),\r\n ('local_response_norm', (S, S, S), (2, ),),\r\n ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '', (True, 'aten::nll_loss_forward')),\r\n ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),\r\n ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),\r\n ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),\r\n ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),\r\n ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),\r\n ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\r\n ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\r\n ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\r\n ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),\r\n ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),\r\n ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),\r\n ('margin_ranking_loss', (3, S), ((3, S), (S,)),),\r\n ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\r\n ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\r\n ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\r\n ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),\r\n ('pixel_shuffle', (1, 9, 4, 4), (3,),),\r\n ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),\r\n ('pad', (3, 3, 4, 2), ([1, 1],),),\r\n ('pairwise_distance', (S, S), ((S, S),),),\r\n ('pdist', (S, S), (),),\r\n ('cosine_similarity', (S, S), ((S, S),),),\r\n ('triplet_margin_loss', (S, S), ((S, S), (S, S)),),\r\n ('normalize', (S, S, S), (),),\r\n ('unfold', (S, S, S, S), ([2, 3]),),\r\n ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),\r\n ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),\r\n ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),\r\n ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),\r\n ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),\r\n ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),\r\n 1, 1., non_differentiable(torch.randn(S))),),\r\n ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),\r\n non_differentiable(torch.randn(3, 2))),),\r\n ('binary_cross_entropy', torch.randn(3, 2).sigmoid(),\r\n (non_differentiable(torch.rand(3, 2)),\r\n non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),\r\n ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),\r\n (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),\r\n torch.randint(1, S, (S,), dtype=torch.long))),\r\n ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),\r\n ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),\r\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),\r\n ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),\r\n ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),\r\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),\r\n ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),\r\n ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),\r\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),\r\n ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),\r\n ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),\r\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),\r\n ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),\r\n ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),\r\n ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),\r\n ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),\r\n ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),\r\n ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),\r\n ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),\r\n ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),\r\n ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),\r\n ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),\r\n ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),\r\n ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),\r\n ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),\r\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),\r\n 'nearest_4d_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),\r\n 'nearest_4d_with_size_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),\r\n 'bilinear_4d_with_scale_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),\r\n 'bilinear_4d_with_size_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),\r\n 'bicubic_4d_with_scale_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),\r\n 'bicubic_4d_with_size_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),\r\n 'nearest_3d_with_scale_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),\r\n 'nearest_3d_with_size_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),\r\n 'linear_3d_with_scale_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),\r\n 'linear_3d_with_size_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),\r\n 'nearest_5d_with_scale_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),\r\n 'nearest_5d_with_size_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),\r\n 'trilinear_5d_with_scale_not_recompute_scale_factor'),\r\n ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),\r\n 'trilinear_5d_with_size_not_recompute_scale_factor'),\r\n]\r\n\r\nscript_template = '''\r\ndef the_method({}):\r\n return {}\r\n'''\r\n\r\ndef get_call(method_name, func_type, args, kwargs):\r\n kwargs_str = ', '.join([k + '=' + str(v) for k, v in kwargs.items()])\r\n self_arg = args[0]\r\n if(func_type == 'method'):\r\n args = args[1:]\r\n\r\n argument_str = ', '.join(args)\r\n argument_str += ', ' if len(args) and len(kwargs) else ''\r\n argument_str += kwargs_str\r\n\r\n if func_type == 'functional':\r\n call = 'torch.{}({})'.format(method_name, argument_str)\r\n elif func_type == 'method':\r\n call = '{}.{}({})'.format(self_arg, method_name, argument_str)\r\n elif func_type == 'nn_functional':\r\n call = 'torch.nn.functional.{}({})'.format(method_name, argument_str)\r\n else:\r\n raise 'Unsupported function type'\r\n\r\n return call\r\n\r\ndef get_constant(x):\r\n if x == inf:\r\n return 'math.inf'\r\n if x == -inf:\r\n return '-math.inf'\r\n return x\r\n\r\ndef get_script_args(args):\r\n formals = []\r\n tensors = []\r\n actuals = []\r\n for arg in args:\r\n if isinstance(arg, torch.Tensor):\r\n name = 'i{}'.format(len(formals))\r\n formals.append(name)\r\n actuals.append(name)\r\n tensors.append(arg)\r\n elif isinstance(arg, str):\r\n actuals.append(\"'{}'\".format(arg))\r\n else:\r\n actuals.append(str(get_constant(arg)))\r\n return (formals, tensors, actuals)\r\n\r\n# create a script function from (name, func_type, output_process_fn),\r\n# and returns the compiled function and example inputs\r\ndef gen_script_fn_and_args(method_name, func_type, *args, **kwargs):\r\n formals, tensors, actuals = get_script_args(args)\r\n call = get_call(method_name, func_type, actuals, kwargs)\r\n script = script_template.format(', '.join(formals), call)\r\n CU = torch.jit.CompilationUnit(script)\r\n return CU.the_method, tensors\r\n\r\n# create a script function from (name, func_type, output_process_fn),\r\n# returns a function takes in (args, kwargs) and runs the compiled function and\r\n# then applies the post process fn to the outputs\r\ndef create_script_fn(self, method_name, func_type, output_process_fn):\r\n def script_fn(*args, **kwargs):\r\n fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)\r\n self.assertExportImport(fn.graph, tensors)\r\n output = output_process_fn(fn(*tensors))\r\n script_fn.last_graph = fn.graph_for(*tensors)\r\n return output\r\n return script_fn\r\n\r\n# make a new function where all non-tensor arguments in 'args' have been partially\r\n# applied, and all tensor arguments remain.\r\n# used to trace functions when some arguments are not tensors\r\ndef partial_apply_nontensors(fn, args, **kwargs):\r\n source = ['t' if isinstance(arg, torch.Tensor) else 's' for arg in args]\r\n\r\n def new_fn(*tensors_):\r\n tensors = iter(tensors_)\r\n return fn(*(args[i] if s == 's' else next(tensors) for i, s in enumerate(source)), **kwargs)\r\n\r\n return new_fn, [arg for arg in args if isinstance(arg, torch.Tensor)]\r\n\r\n# create a trace function from input fn\r\ndef create_traced_fn(self, fn):\r\n def traced_fn(*inputs, **kwargs):\r\n fn_tensors, inputs_tensors = partial_apply_nontensors(fn, inputs, **kwargs)\r\n # `check_trace` is set to False because check_trace is run with @no_grad\r\n # Also, `check_against_reference` already does all the checks\r\n # against python function\r\n traced = torch.jit.trace(fn_tensors, inputs_tensors, check_trace=False)\r\n self.assertExportImport(traced.graph, inputs_tensors)\r\n output = traced(*inputs_tensors)\r\n traced_fn.last_graph = traced.graph_for(*inputs_tensors)\r\n return output\r\n return traced_fn\r\n\r\n# known to be failing in script\r\nEXCLUDE_SCRIPT = {\r\n 'test_norm_fro_default',\r\n 'test_norm_fro_cpu',\r\n 'test_norm_nuc',\r\n 'test_norm_fro',\r\n 'test_norm_nuc_batched',\r\n\r\n # aten op has additional cudnn argument\r\n 'test_nn_unfold',\r\n\r\n # flaky test - TODO fix\r\n 'test_nn_ctc_loss',\r\n\r\n # unknown builtin op\r\n 'test_nn_fold',\r\n\r\n # jit doesn't support sparse tensors.\r\n 'test_to_sparse'\r\n}\r\n\r\n# generates a script function and set of example inputs\r\n# from a specified test in the format of nn_functional_tests\r\ndef get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args):\r\n test_name = 'test_nn_' + name\r\n\r\n if variant_name != '':\r\n test_name = test_name + '_' + variant_name\r\n\r\n no_grad = variant_name == 'inplace'\r\n\r\n self_variable = create_input((self_size,))[0][0]\r\n kwargs = None\r\n\r\n # need to record this because methods can change the size (e.g. unsqueeze)\r\n args_variable, kwargs_variable = create_input(args)\r\n\r\n self_tensor = deepcopy(self_variable.data)\r\n args_tensor = deepcopy(unpack_variables(args_variable))\r\n\r\n f_args_variable = (self_variable,) + args_variable\r\n f_args_tensor = (self_tensor,) + args_tensor\r\n with torch.jit._disable_emit_hooks():\r\n script_fn, inputs = gen_script_fn_and_args(name, \"nn_functional\", *f_args_variable)\r\n return script_fn, inputs\r\n\r\n\r\n# additional modules test\r\n# TODO: delete this list once we make all nn_tests work\r\nadditional_module_tests = [\r\n {\r\n 'module_name': 'Bilinear',\r\n 'constructor_args': (S, S, M),\r\n 'input_size': (S, S),\r\n 'extra_args': ((S, S),)\r\n },\r\n {\r\n 'module_name': 'RNNCell',\r\n 'constructor_args': (S, S),\r\n 'input_size': (S, S),\r\n },\r\n {\r\n 'module_name': 'LSTMCell',\r\n 'constructor_args': (S, S),\r\n 'input_size': (S, S),\r\n },\r\n {\r\n 'module_name': 'GRUCell',\r\n 'constructor_args': (S, S),\r\n 'input_size': (S, S),\r\n },\r\n {\r\n 'module_name': 'MultiheadAttention',\r\n 'constructor_args': (128, 8),\r\n 'input_size': (10, 8, 128),\r\n 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)),\r\n 'slowTest': True\r\n },\r\n {\r\n 'module_name': 'Transformer',\r\n 'constructor_args': (1, 1, 1, 1, 2),\r\n 'input_size': (3, 1, 1),\r\n 'extra_args': (torch.randn(1, 1, 1),),\r\n 'slowTest': True\r\n }\r\n]\r\n\r\nEXCLUDE_SCRIPT_MODULES = {\r\n 'test_nn_AdaptiveAvgPool2d_tuple_none',\r\n 'test_nn_AdaptiveAvgPool3d_tuple_none',\r\n 'test_nn_AdaptiveMaxPool2d_tuple_none',\r\n 'test_nn_AdaptiveMaxPool3d_tuple_none',\r\n\r\n # Doesn't use future division, so this is not supported\r\n 'test_nn_CrossMapLRN2d',\r\n}\r\n\r\nscript_method_template = '''\r\ndef forward({}):\r\n return {}\r\n'''\r\n\r\ndef create_script_module(self, nn_module, constructor_args, *args, **kwargs):\r\n def script_module(*args, **kwargs):\r\n formals, tensors, actuals = get_script_args(args)\r\n\r\n method_args = ', '.join(['self'] + actuals)\r\n call_args_str = ', '.join(actuals)\r\n call = \"self.submodule({})\".format(call_args_str)\r\n script = script_method_template.format(method_args, call)\r\n\r\n submodule_constants = []\r\n if kwargs.get('is_constant'):\r\n submodule_constants = ['submodule']\r\n\r\n # Create module to use the script method\r\n class TheModule(torch.jit.ScriptModule):\r\n __constants__ = submodule_constants\r\n\r\n def __init__(self):\r\n super(TheModule, self).__init__()\r\n self.submodule = nn_module(*constructor_args)\r\n\r\n def make_module(script):\r\n module = TheModule()\r\n # check __repr__\r\n str(module)\r\n module.define(script)\r\n return module\r\n\r\n module = make_module(script)\r\n if self:\r\n self.assertExportImportModule(module, tensors)\r\n module(*args)\r\n create_script_module.last_graph = module.graph\r\n return module\r\n return script_module\r\n\r\ndef get_nn_module_name_from_kwargs(**kwargs):\r\n if 'module_name' in kwargs:\r\n return kwargs['module_name']\r\n elif 'fullname' in kwargs:\r\n return kwargs['fullname']\r\n elif 'constructor' in kwargs:\r\n return kwargs['constructor'].__name__\r\n\r\ndef get_nn_mod_test_name(**kwargs):\r\n name = get_nn_module_name_from_kwargs(**kwargs)\r\n test_name = name\r\n if 'desc' in kwargs:\r\n test_name = \"{}_{}\".format(test_name, kwargs['desc'])\r\n return 'test_nn_{}'.format(test_name)\r\n\r\ndef get_nn_module_class_from_kwargs(**kwargs):\r\n name = get_nn_module_name_from_kwargs(**kwargs)\r\n index = name.find(\"_\")\r\n if index == -1:\r\n return name\r\n else:\r\n return name[0:name.find(\"_\")]\r\n\r\ndef try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):\r\n name = get_nn_module_name_from_kwargs(**kwargs)\r\n\r\n if 'desc' in kwargs and 'eval' in kwargs['desc']:\r\n # eval() is not supported, so skip these tests\r\n return\r\n\r\n test_name = name\r\n if 'desc' in kwargs:\r\n test_name = \"{}_{}\".format(test_name, kwargs['desc'])\r\n test_name = get_nn_mod_test_name(**kwargs)\r\n\r\n if test_name in EXCLUDE_SCRIPT_MODULES:\r\n return\r\n if 'constructor' in kwargs:\r\n nn_module = kwargs['constructor']\r\n else:\r\n nn_module = getattr(torch.nn, name)\r\n\r\n if \"FunctionalModule\" in str(nn_module):\r\n return\r\n\r\n if 'constructor_args_fn' in kwargs:\r\n constructor_args = kwargs['constructor_args_fn']()\r\n else:\r\n constructor_args = kwargs.get('constructor_args', ())\r\n\r\n # Set up inputs from tuple of sizes or constructor fn\r\n if 'input_fn' in kwargs:\r\n input = kwargs['input_fn']()\r\n else:\r\n input = (kwargs['input_size'],)\r\n\r\n # Extra parameters to forward()\r\n if 'extra_args' in kwargs:\r\n input = input + kwargs['extra_args']\r\n\r\n if 'target_size' in kwargs:\r\n input = input + (kwargs['target_size'],)\r\n elif 'target_fn' in kwargs:\r\n if torch.is_tensor(input):\r\n input = (input,)\r\n input = input + (kwargs['target_fn'](),)\r\n\r\n args_variable, kwargs_variable = create_input(input)\r\n f_args_variable = deepcopy(unpack_variables(args_variable))\r\n out_var = deepcopy(f_args_variable)\r\n\r\n args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable)\r\n\r\n return mod, out_var\r\n\r\n\r\ndef get_all_nn_module_tests():\r\n return module_tests + new_module_tests + additional_module_tests\r\n"
] | [
[
"torch.conv_transpose2d",
"torch.conv1d",
"torch.conv_transpose3d",
"torch.conv2d",
"torch.conv3d",
"torch.conv_transpose1d"
],
[
"torch.empty",
"torch.ones",
"torch.Size",
"torch.randint",
"torch.randn",
"torch.jit._disable_emit_hooks",
"torch.rand",
"torch.jit.CompilationUnit",
"torch.tensor",
"torch.full",
"torch.set_default_dtype",
"torch.testing._internal.common_methods_invocations.unpack_variables",
"torch.is_tensor",
"torch.testing._internal.common_methods_invocations.create_input",
"torch.zeros",
"torch.jit.trace"
]
] |
gpspelle/pose-estimation | [
"1dec506ac8abf00616dc0fe76bf476ccdfd6b93e"
] | [
"tf_pose/slim/nets/mobilenet/mobilenet.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Mobilenet Base Class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport collections\nimport contextlib\nimport copy\nimport os\n\nimport tensorflow as tf\n\n\nslim = tf.contrib.slim\n\n\[email protected]_arg_scope\ndef apply_activation(x, name=None, activation_fn=None):\n return activation_fn(x, name=name) if activation_fn else x\n\n\ndef _fixed_padding(inputs, kernel_size, rate=1):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Pads the input such that if it was used in a convolution with 'VALID' padding,\n the output would have the same dimensions as if the unpadded input was used\n in a convolution with 'SAME' padding.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n kernel_size: The kernel to be used in the conv2d or max_pool2d operation.\n rate: An integer, rate for atrous convolution.\n\n Returns:\n output: A tensor of size [batch, height_out, width_out, channels] with the\n input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).\n \"\"\"\n kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),\n kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]\n pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]\n pad_beg = [pad_total[0] // 2, pad_total[1] // 2]\n pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],\n [pad_beg[1], pad_end[1]], [0, 0]])\n return padded_inputs\n\n\ndef _make_divisible(v, divisor, min_value=None):\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\[email protected]\ndef _set_arg_scope_defaults(defaults):\n \"\"\"Sets arg scope defaults for all items present in defaults.\n\n Args:\n defaults: dictionary/list of pairs, containing a mapping from\n function to a dictionary of default args.\n\n Yields:\n context manager where all defaults are set.\n \"\"\"\n if hasattr(defaults, 'items'):\n items = list(defaults.items())\n else:\n items = defaults\n if not items:\n yield\n else:\n func, default_arg = items[0]\n with slim.arg_scope(func, **default_arg):\n with _set_arg_scope_defaults(items[1:]):\n yield\n\n\[email protected]_arg_scope\ndef depth_multiplier(output_params,\n multiplier,\n divisible_by=8,\n min_depth=8,\n **unused_kwargs):\n if 'num_outputs' not in output_params:\n return\n d = output_params['num_outputs']\n output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,\n min_depth)\n\n\n_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])\n\n\ndef op(opfunc, **params):\n multiplier = params.pop('multiplier_transorm', depth_multiplier)\n return _Op(opfunc, params=params, multiplier_func=multiplier)\n\n\nclass NoOpScope(object):\n \"\"\"No-op context manager.\"\"\"\n\n def __enter__(self):\n return None\n\n def __exit__(self, exc_type, exc_value, traceback):\n return False\n\n\ndef safe_arg_scope(funcs, **kwargs):\n \"\"\"Returns `slim.arg_scope` with all None arguments removed.\n\n Arguments:\n funcs: Functions to pass to `arg_scope`.\n **kwargs: Arguments to pass to `arg_scope`.\n\n Returns:\n arg_scope or No-op context manager.\n\n Note: can be useful if None value should be interpreted as \"do not overwrite\n this parameter value\".\n \"\"\"\n filtered_args = {name: value for name, value in kwargs.items()\n if value is not None}\n if filtered_args:\n return slim.arg_scope(funcs, **filtered_args)\n else:\n return NoOpScope()\n\n\[email protected]_arg_scope\ndef mobilenet_base( # pylint: disable=invalid-name\n inputs,\n conv_defs,\n multiplier=1.0,\n final_endpoint=None,\n output_stride=None,\n use_explicit_padding=False,\n scope=None,\n is_training=False):\n \"\"\"Mobilenet base network.\n\n Constructs a network from inputs to the given final endpoint. By default\n the network is constructed in inference mode. To create network\n in training mode use:\n\n with slim.arg_scope(mobilenet.training_scope()):\n logits, endpoints = mobilenet_base(...)\n\n Args:\n inputs: a tensor of shape [batch_size, height, width, channels].\n conv_defs: A list of op(...) layers specifying the net architecture.\n multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n final_endpoint: The name of last layer, for early termination for\n for V1-based networks: last layer is \"layer_14\", for V2: \"layer_20\"\n output_stride: An integer that specifies the requested ratio of input to\n output spatial resolution. If not None, then we invoke atrous convolution\n if necessary to prevent the network from reducing the spatial resolution\n of the activation maps. Allowed values are 1 or any even number, excluding\n zero. Typical values are 8 (accurate fully convolutional mode), 16\n (fast fully convolutional mode), and 32 (classification mode).\n\n NOTE- output_stride relies on all consequent operators to support dilated\n operators via \"rate\" parameter. This might require wrapping non-conv\n operators to operate properly.\n\n use_explicit_padding: Use 'VALID' padding for convolutions, but prepad\n inputs so that the output dimensions are the same as if 'SAME' padding\n were used.\n scope: optional variable scope.\n is_training: How to setup batch_norm and other ops. Note: most of the time\n this does not need be set directly. Use mobilenet.training_scope() to set\n up training instead. This parameter is here for backward compatibility\n only. It is safe to set it to the value matching\n training_scope(is_training=...). It is also safe to explicitly set\n it to False, even if there is outer training_scope set to to training.\n (The network will be built in inference mode). If this is set to None,\n no arg_scope is added for slim.batch_norm's is_training parameter.\n\n Returns:\n tensor_out: output tensor.\n end_points: a set of activations for external use, for example summaries or\n losses.\n\n Raises:\n ValueError: depth_multiplier <= 0, or the target output_stride is not\n allowed.\n \"\"\"\n if multiplier <= 0:\n raise ValueError('multiplier is not greater than zero.')\n\n # Set conv defs defaults and overrides.\n conv_defs_defaults = conv_defs.get('defaults', {})\n conv_defs_overrides = conv_defs.get('overrides', {})\n if use_explicit_padding:\n conv_defs_overrides = copy.deepcopy(conv_defs_overrides)\n conv_defs_overrides[\n (slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}\n\n if output_stride is not None:\n if output_stride == 0 or (output_stride > 1 and output_stride % 2):\n raise ValueError('Output stride must be None, 1 or a multiple of 2.')\n\n # a) Set the tensorflow scope\n # b) set padding to default: note we might consider removing this\n # since it is also set by mobilenet_scope\n # c) set all defaults\n # d) set all extra overrides.\n with _scope_all(scope, default_scope='Mobilenet'), \\\n safe_arg_scope([slim.batch_norm], is_training=is_training), \\\n _set_arg_scope_defaults(conv_defs_defaults), \\\n _set_arg_scope_defaults(conv_defs_overrides):\n # The current_stride variable keeps track of the output stride of the\n # activations, i.e., the running product of convolution strides up to the\n # current network layer. This allows us to invoke atrous convolution\n # whenever applying the next convolution would result in the activations\n # having output stride larger than the target output_stride.\n current_stride = 1\n\n # The atrous convolution rate parameter.\n rate = 1\n\n net = inputs\n # Insert default parameters before the base scope which includes\n # any custom overrides set in mobilenet.\n end_points = {}\n scopes = {}\n for i, opdef in enumerate(conv_defs['spec']):\n params = dict(opdef.params)\n opdef.multiplier_func(params, multiplier)\n stride = params.get('stride', 1)\n if output_stride is not None and current_stride == output_stride:\n # If we have reached the target output_stride, then we need to employ\n # atrous convolution with stride=1 and multiply the atrous rate by the\n # current unit's stride for use in subsequent layers.\n layer_stride = 1\n layer_rate = rate\n rate *= stride\n else:\n layer_stride = stride\n layer_rate = 1\n current_stride *= stride\n # Update params.\n params['stride'] = layer_stride\n # Only insert rate to params if rate > 1.\n if layer_rate > 1:\n params['rate'] = layer_rate\n # Set padding\n if use_explicit_padding:\n if 'kernel_size' in params:\n net = _fixed_padding(net, params['kernel_size'], layer_rate)\n else:\n params['use_explicit_padding'] = True\n\n end_point = 'layer_%d' % (i + 1)\n try:\n net = opdef.op(net, **params)\n except Exception:\n print('Failed to create op %i: %r params: %r' % (i, opdef, params))\n raise\n end_points[end_point] = net\n scope = os.path.dirname(net.name)\n scopes[scope] = end_point\n if final_endpoint is not None and end_point == final_endpoint:\n break\n\n # Add all tensors that end with 'output' to\n # endpoints\n for t in net.graph.get_operations():\n scope = os.path.dirname(t.name)\n bn = os.path.basename(t.name)\n if scope in scopes and t.name.endswith('output'):\n end_points[scopes[scope] + '/' + bn] = t.outputs[0]\n return net, end_points\n\n\[email protected]\ndef _scope_all(scope, default_scope=None):\n with tf.variable_scope(scope, default_name=default_scope) as s,\\\n tf.name_scope(s.original_name_scope):\n yield s\n\n\[email protected]_arg_scope\ndef mobilenet(inputs,\n num_classes=1001,\n prediction_fn=slim.softmax,\n reuse=None,\n scope='Mobilenet',\n base_only=False,\n **mobilenet_args):\n \"\"\"Mobilenet model for classification, supports both V1 and V2.\n\n Note: default mode is inference, use mobilenet.training_scope to create\n training network.\n\n\n Args:\n inputs: a tensor of shape [batch_size, height, width, channels].\n num_classes: number of predicted classes. If 0 or None, the logits layer\n is omitted and the input features to the logits layer (before dropout)\n are returned instead.\n prediction_fn: a function to get predictions out of logits\n (default softmax).\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n base_only: if True will only create the base of the network (no pooling\n and no logits).\n **mobilenet_args: passed to mobilenet_base verbatim.\n - conv_defs: list of conv defs\n - multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n - output_stride: will ensure that the last layer has at most total stride.\n If the architecture calls for more stride than that provided\n (e.g. output_stride=16, but the architecture has 5 stride=2 operators),\n it will replace output_stride with fractional convolutions using Atrous\n Convolutions.\n\n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, num_classes]\n end_points: a dictionary from components of the network to the corresponding\n activation tensor.\n\n Raises:\n ValueError: Input rank is invalid.\n \"\"\"\n is_training = mobilenet_args.get('is_training', False)\n input_shape = inputs.get_shape().as_list()\n if len(input_shape) != 4:\n raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))\n\n with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:\n inputs = tf.identity(inputs, 'input')\n net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)\n if base_only:\n return net, end_points\n\n net = tf.identity(net, name='embedding')\n\n with tf.variable_scope('Logits'):\n net = global_pool(net)\n end_points['global_pool'] = net\n if not num_classes:\n return net, end_points\n net = slim.dropout(net, scope='Dropout', is_training=is_training)\n # 1 x 1 x num_classes\n # Note: legacy scope name.\n logits = slim.conv2d(\n net,\n num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n biases_initializer=tf.zeros_initializer(),\n scope='Conv2d_1c_1x1')\n\n logits = tf.squeeze(logits, [1, 2])\n\n logits = tf.identity(logits, name='output')\n end_points['Logits'] = logits\n if prediction_fn:\n end_points['Predictions'] = prediction_fn(logits, 'Predictions')\n return logits, end_points\n\n\ndef global_pool(input_tensor, pool_op=tf.nn.avg_pool):\n \"\"\"Applies avg pool to produce 1x1 output.\n\n NOTE: This function is funcitonally equivalenet to reduce_mean, but it has\n baked in average pool which has better support across hardware.\n\n Args:\n input_tensor: input tensor\n pool_op: pooling op (avg pool is default)\n Returns:\n a tensor batch_size x 1 x 1 x depth.\n \"\"\"\n shape = input_tensor.get_shape().as_list()\n if shape[1] is None or shape[2] is None:\n kernel_size = tf.convert_to_tensor(\n [1, tf.shape(input_tensor)[1],\n tf.shape(input_tensor)[2], 1])\n else:\n kernel_size = [1, shape[1], shape[2], 1]\n output = pool_op(\n input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')\n # Recover output shape, for unknown shape.\n output.set_shape([None, 1, 1, None])\n return output\n\n\ndef training_scope(is_training=True,\n weight_decay=0.00004,\n stddev=0.09,\n dropout_keep_prob=0.8,\n bn_decay=0.997):\n \"\"\"Defines Mobilenet training scope.\n\n Usage:\n with tf.contrib.slim.arg_scope(mobilenet.training_scope()):\n logits, endpoints = mobilenet_v2.mobilenet(input_tensor)\n\n # the network created will be trainble with dropout/batch norm\n # initialized appropriately.\n Args:\n is_training: if set to False this will ensure that all customizations are\n set to non-training mode. This might be helpful for code that is reused\n across both training/evaluation, but most of the time training_scope with\n value False is not needed. If this is set to None, the parameters is not\n added to the batch_norm arg_scope.\n\n weight_decay: The weight decay to use for regularizing the model.\n stddev: Standard deviation for initialization, if negative uses xavier.\n dropout_keep_prob: dropout keep probability (not set if equals to None).\n bn_decay: decay for the batch norm moving averages (not set if equals to\n None).\n\n Returns:\n An argument scope to use via arg_scope.\n \"\"\"\n # Note: do not introduce parameters that would change the inference\n # model here (for example whether to use bias), modify conv_def instead.\n batch_norm_params = {\n 'decay': bn_decay,\n 'is_training': is_training\n }\n if stddev < 0:\n weight_intitializer = slim.initializers.xavier_initializer()\n else:\n weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)\n\n # Set weight_decay for weights in Conv and FC layers.\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected, slim.separable_conv2d],\n weights_initializer=weight_intitializer,\n normalizer_fn=slim.batch_norm), \\\n slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\\\n safe_arg_scope([slim.batch_norm], **batch_norm_params), \\\n safe_arg_scope([slim.dropout], is_training=is_training,\n keep_prob=dropout_keep_prob), \\\n slim.arg_scope([slim.conv2d], \\\n weights_regularizer=slim.l2_regularizer(weight_decay)), \\\n slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:\n return s\n"
] | [
[
"tensorflow.pad",
"tensorflow.shape",
"tensorflow.truncated_normal_initializer",
"tensorflow.zeros_initializer",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.identity"
]
] |
tao2020/Horizon | [
"0f9a1b16ddd6e5a8ac98e61acd227aae7c201b57"
] | [
"ml/rl/workflow/dqn_workflow.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\nimport sys\nfrom typing import Dict\n\nimport numpy as np\nfrom ml.rl.evaluation.evaluator import Evaluator\nfrom ml.rl.preprocessing.preprocessor import Preprocessor\nfrom ml.rl.preprocessing.sparse_to_dense import PandasSparseToDenseProcessor\nfrom ml.rl.readers.json_dataset_reader import JSONDatasetReader\nfrom ml.rl.tensorboardX import summary_writer_context\nfrom ml.rl.thrift.core.ttypes import (\n DiscreteActionModelParameters,\n NormalizationParameters,\n RainbowDQNParameters,\n RLParameters,\n TrainingParameters,\n)\nfrom ml.rl.training.dqn_trainer import DQNTrainer\nfrom ml.rl.workflow.base_workflow import BaseWorkflow\nfrom ml.rl.workflow.helpers import (\n export_trainer_and_predictor,\n minibatch_size_multiplier,\n parse_args,\n update_model_for_warm_start,\n)\nfrom ml.rl.workflow.preprocess_handler import DqnPreprocessHandler, PreprocessHandler\nfrom tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DqnWorkflow(BaseWorkflow):\n def __init__(\n self,\n model_params: DiscreteActionModelParameters,\n preprocess_handler: PreprocessHandler,\n state_normalization: Dict[int, NormalizationParameters],\n use_gpu: bool,\n use_all_avail_gpus: bool,\n ):\n logger.info(\"Running DQN workflow with params:\")\n logger.info(model_params)\n model_params = model_params\n\n trainer = DQNTrainer(\n model_params,\n state_normalization,\n use_gpu=use_gpu,\n use_all_avail_gpus=use_all_avail_gpus,\n )\n trainer = update_model_for_warm_start(trainer)\n assert type(trainer) == DQNTrainer, \"Warm started wrong model type: \" + str(\n type(trainer)\n )\n\n evaluator = Evaluator(\n model_params.actions,\n model_params.rl.gamma,\n trainer,\n metrics_to_score=trainer.metrics_to_score,\n )\n\n super(DqnWorkflow, self).__init__(\n preprocess_handler, trainer, evaluator, model_params.training.minibatch_size\n )\n\n\ndef main(params):\n # Set minibatch size based on # of devices being used to train\n params[\"training\"][\"minibatch_size\"] *= minibatch_size_multiplier(\n params[\"use_gpu\"], params[\"use_all_avail_gpus\"]\n )\n\n rl_parameters = RLParameters(**params[\"rl\"])\n training_parameters = TrainingParameters(**params[\"training\"])\n rainbow_parameters = RainbowDQNParameters(**params[\"rainbow\"])\n\n model_params = DiscreteActionModelParameters(\n actions=params[\"actions\"],\n rl=rl_parameters,\n training=training_parameters,\n rainbow=rainbow_parameters,\n )\n state_normalization = BaseWorkflow.read_norm_file(params[\"state_norm_data_path\"])\n\n writer = SummaryWriter(log_dir=params[\"model_output_path\"])\n logger.info(\"TensorBoard logging location is: {}\".format(writer.log_dir))\n\n preprocess_handler = DqnPreprocessHandler(\n Preprocessor(state_normalization, False),\n np.array(model_params.actions),\n PandasSparseToDenseProcessor(),\n )\n\n workflow = DqnWorkflow(\n model_params,\n preprocess_handler,\n state_normalization,\n params[\"use_gpu\"],\n params[\"use_all_avail_gpus\"],\n )\n\n train_dataset = JSONDatasetReader(\n params[\"training_data_path\"], batch_size=training_parameters.minibatch_size\n )\n eval_dataset = JSONDatasetReader(params[\"eval_data_path\"], batch_size=16)\n\n with summary_writer_context(writer):\n workflow.train_network(train_dataset, eval_dataset, int(params[\"epochs\"]))\n return export_trainer_and_predictor(\n workflow.trainer, params[\"model_output_path\"]\n ) # noqa\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n params = parse_args(sys.argv)\n\n main(params)\n"
] | [
[
"numpy.array"
]
] |
yphsieh/rPPG_blink | [
"31be5b818d34892eb9f2c1abd3b00f370413e3db"
] | [
"evaluation.py"
] | [
"import os\nimport argparse\nfrom keras.models import load_model\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, f1_score\n\nfrom data_preprocessing import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-m', '--model_name', default='save/RDNN.h5', type=str)\nparser.add_argument('--smooth', type=bool, default=False)\nparser.add_argument('--scale', type=bool, default=False)\nargs = parser.parse_args()\nprint(args)\n\n\nx_test = np.load('data/data_test_600.npy')\ny_test = np.load('data/label_test_600.npy').reshape(-1, 1)\nprint('x_test: {}'.format(x_test.shape))\nprint('y_test: {}'.format(y_test.shape))\n\nlie_ratio = np.sum(y_test)/y_test.shape[0]\nprint('Lie Ratio: {}'.format(lie_ratio))\n\nx_test = TestPreprocess(x_test, args.smooth, args.scale)\n\nprint('='*20, 'Model Loading...', '='*20)\nmodel = load_model(args.model_name)\nprint('='*20, 'Model Loaded', '='*20)\n\n# os.system('clear')\n\npredict = model.predict(x_test)\ny_predict = (predict > 0.3).astype(np.int)\n\nlie_ratio = np.sum(y_predict)/y_predict.shape[0]\nprint('Lie Ratio Predicted: {}'.format(lie_ratio))\n\n\nscore_f1 = f1_score(y_test, y_predict)\nscore_acc = accuracy_score(y_test, y_predict)\nprint('f1 score: {}'.format(score_f1))\nprint('accuracy score: {}'.format(score_acc))\n"
] | [
[
"sklearn.metrics.f1_score",
"numpy.load",
"numpy.sum",
"sklearn.metrics.accuracy_score"
]
] |
decarlof/algotom | [
"3dce086bcc0c4df97700c60f8ec90e07ee95d040"
] | [
"tests/test_util/test_calibration.py"
] | [
"# ============================================================================\n# ============================================================================\n# Copyright (c) 2021 Nghia T. Vo. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Author: Nghia T. Vo\n# E-mail: [email protected]\n# Description: Tests for the Algotom package.\n# Contributors:\n# ============================================================================\n\"\"\"\nTests for methods in util/calibration.py\n\"\"\"\n\nimport unittest\nimport numpy as np\nimport scipy.ndimage as ndi\nimport algotom.util.calibration as cali\n\n\nclass CalibrationMethods(unittest.TestCase):\n\n def setUp(self):\n self.eps = 10 ** (-6)\n self.var = 0.05\n sigma = 30\n (self.hei, self.wid) = (64, 64)\n (ycen, xcen) = (self.hei // 2, self.wid // 2)\n y, x = np.ogrid[-ycen:self.hei - ycen, -xcen:self.wid - xcen]\n num = 2.0 * sigma * sigma\n self.bck = np.exp(-(x * x / num + y * y / num))\n mat = np.zeros((self.hei, self.wid), dtype=np.float32)\n self.num_dots = 1\n mat[ycen - 3:ycen + 3, xcen - 3:xcen + 3] = 1\n self.mat_dots = np.float32(ndi.binary_dilation(mat, iterations=2))\n\n def test_normalize_background(self):\n mat_nor = cali.normalize_background(self.bck, 3)\n std_val = np.std(mat_nor)\n self.assertTrue(std_val <= self.var)\n\n def test_normalize_background_based_fft(self):\n mat_nor = cali.normalize_background_based_fft(self.bck, sigma=5, pad=10)\n std_val = np.std(mat_nor)\n self.assertTrue(std_val <= self.var)\n\n def test_binarize_image(self):\n bck = 0.5 * np.random.rand(self.hei, self.wid)\n mat_bin = cali.binarize_image(self.mat_dots + bck, bgr=\"dark\",\n denoise=False)\n num_dots = ndi.label(mat_bin)[-1]\n self.assertTrue(self.num_dots == num_dots)\n\n def test_calculate_distance(self):\n mat1 = np.zeros((self.hei, self.wid), dtype=np.float32)\n mat2 = np.zeros_like(mat1)\n bck = 0.5 * np.random.rand(self.hei, self.wid)\n mat1[5, 10] = 1.0\n mat1 = np.float32(ndi.binary_dilation(mat1, iterations=3))\n mat2[5, 20] = 1.0\n mat2 = np.float32(ndi.binary_dilation(mat2, iterations=3))\n dis = cali.calculate_distance(mat1 + bck, mat2 + bck, bgr=\"dark\",\n denoise=False)\n self.assertTrue(np.abs(dis - 10.0) <= self.eps)\n"
] | [
[
"numpy.zeros_like",
"numpy.zeros",
"scipy.ndimage.label",
"scipy.ndimage.binary_dilation",
"numpy.abs",
"numpy.exp",
"numpy.random.rand",
"numpy.std"
]
] |
thompsonsed/pycoalescence | [
"eddce52ad7b3584e1fb208532d6851751b27dd4a"
] | [
"pycoalescence/tests/test_coalescence_tree.py"
] | [
"\"\"\"\nTests the coalescence tree object.\n\"\"\"\nimport os\nimport random\nimport shutil\nimport sqlite3\nimport sys\nimport unittest\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\nfrom setup_tests import setUpAll, tearDownAll, skipLongTest\n\nfrom pycoalescence import Simulation\nfrom pycoalescence.coalescence_tree import CoalescenceTree, get_parameter_description\nfrom pycoalescence.sqlite_connection import check_sql_table_exist\n\n\ndef setUpModule():\n \"\"\"\n Creates the output directory and moves logging files\n \"\"\"\n setUpAll()\n t = CoalescenceTree(\"sample/sample.db\")\n t.clear_calculations()\n\n\ndef tearDownModule():\n \"\"\"\n Removes the output directory\n \"\"\"\n tearDownAll()\n\n\nclass TestNullSimulationErrors(unittest.TestCase):\n \"\"\"\n Tests that simulations that are not linked raise the correct error.\n \"\"\"\n\n def testRaisesError(self):\n \"\"\"\n Tests that a null simulation will raise an error when any operation is performed.\n \"\"\"\n t = CoalescenceTree()\n with self.assertRaises(RuntimeError):\n t.get_species_richness()\n with self.assertRaises(RuntimeError):\n t.calculate_fragment_richness()\n with self.assertRaises(RuntimeError):\n t.calculate_alpha_diversity()\n with self.assertRaises(RuntimeError):\n t.calculate_beta_diversity()\n with self.assertRaises(RuntimeError):\n t.calculate_fragment_abundances()\n with self.assertRaises(RuntimeError):\n t.calculate_fragment_octaves()\n with self.assertRaises(RuntimeError):\n t.calculate_octaves()\n with self.assertRaises(RuntimeError):\n t.get_fragment_list()\n with self.assertRaises(RuntimeError):\n t.get_alpha_diversity()\n with self.assertRaises(RuntimeError):\n t.get_beta_diversity()\n with self.assertRaises(RuntimeError):\n t.get_community_references()\n with self.assertRaises(RuntimeError):\n t.get_metacommunity_references()\n with self.assertRaises(RuntimeError):\n t.get_species_locations()\n with self.assertRaises(RuntimeError):\n t.get_species_abundances()\n with self.assertRaises(RuntimeError):\n t.get_species_list()\n with self.assertRaises(RuntimeError):\n _ = t.get_simulation_parameters()\n with self.assertRaises(RuntimeError):\n t.get_fragment_abundances(\"null\", 1)\n with self.assertRaises(RuntimeError):\n t.get_species_richness()\n with self.assertRaises(RuntimeError):\n t.get_octaves(1)\n\n\nclass TestParameterDescriptions(unittest.TestCase):\n \"\"\"\n Tests that program correctly reads from the parameter_descriptions.json dictionary.\n \"\"\"\n\n def testReadsCorrectly(self):\n \"\"\"\n Tests that the dictionary is read correctly.\n \"\"\"\n tmp_dict = {\n \"habitat_change_rate\": \"the rate of change from present density maps to historic density maps\",\n \"sample_file\": \"the sample area map for spatially selective sampling. Can be null to sample all \" \"cells\",\n \"sample_x\": \"the sample map x dimension\",\n \"sample_y\": \"the sample map y dimension\",\n \"sample_x_offset\": \"the sample x map offset from the grid\",\n \"sample_y_offset\": \"the sample y map offset from the grid\",\n \"output_dir\": \"the output directory for the simulation database\",\n \"seed\": \"the random seed to start the simulation, for repeatability\",\n \"coarse_map_x\": \"the coarse density map x dimension\",\n \"fine_map_file\": \"the density map file location at the finer resolution, covering a smaller area\",\n \"tau\": \"the tau dispersal value for fat-tailed dispersal\",\n \"grid_y\": \"the simulated grid y dimension\",\n \"dispersal_relative_cost\": \"the relative rate of moving through non-habitat compared to habitat\",\n \"fine_map_y_offset\": \"the number of cells the fine map is offset from the sample map in the y \"\n \"dimension, at the fine resolution\",\n \"gen_since_historical\": \"the number of generations that occur before the historical, or historic,\"\n \" state is reached\",\n \"dispersal_method\": \"the dispersal method used. Can be one of 'normal', 'norm-uniform' or \" \"'fat-tail'.\",\n \"historical_fine_map\": \"the historical, or historic, coarse density map file location\",\n \"coarse_map_scale\": \"the scale of the coarse density map compared to the fine density map. 1 \"\n \"means equal density\",\n \"grid_x\": \"the simulated grid x dimension\",\n \"coarse_map_file\": \"the density map file location at the coarser resolution, covering a larger \" \"area\",\n \"min_num_species\": \"the minimum number of species known to exist (currently has no effect)\",\n \"historical_coarse_map\": \"the historical, or historic, coarse density map file location\",\n \"m_probability\": \"the probability of choosing from the uniform dispersal kernel in normal-uniform\"\n \" dispersal\",\n \"sigma\": \"the sigma dispersal value for normal, fat-tailed and normal-uniform dispersals\",\n \"deme\": \"the number of individuals inhabiting a cell at a map density of 1\",\n \"time_config_file\": \"will be 'set' if temporal sampling is used, 'null' otherwise\",\n \"coarse_map_y\": \"the coarse density map y dimension\",\n \"fine_map_x\": \"the fine density map x dimension\",\n \"coarse_map_y_offset\": \"the number of cells the coarse map is offset from the fine map in the y \"\n \"dimension, at the fine resolution\",\n \"cutoff\": \"the maximal dispersal distance possible, for normal-uniform dispersal\",\n \"fine_map_y\": \"the fine density map y dimension\",\n \"sample_size\": \"the proportion of individuals to sample from each cell (0-1)\",\n \"fine_map_x_offset\": \"the number of cells the fine map is offset from the sample map in the x \"\n \"dimension, at the fine resolution\",\n \"speciation_rate\": \"the minimum speciation rate the simulation was run with\",\n \"task\": \"the job or task reference number given to this simulation\",\n \"coarse_map_x_offset\": \"the number of cells the coarse map is offset from the fine map in the x \"\n \"dimension, at the fine resolution\",\n \"landscape_type\": \"if false, landscapes have hard boundaries. Otherwise, can be infinite, \"\n \"with 1s everywhere, or tiled_coarse or tiled_fine for repeated units of tiled \"\n \"maps\",\n \"max_time\": \"the maximum simulation time to run for (in seconds)\",\n \"sim_complete\": \"set to true upon simulation completion, false for incomplete simulations\",\n \"protracted\": \"if true, the simulation was run with protracted speciation.\",\n \"min_speciation_gen\": \"the minimum number of generations required before speciation can occur\",\n \"max_speciation_gen\": \"the maximum number of generations a lineage can exist before it is \" \"speciated\",\n \"dispersal_map\": \"a tif file where rows represent cumulative dispersal probability to every other \"\n \"cell, using the row number = x + (y * x_max)\",\n }\n t = CoalescenceTree(\"sample/sample.db\")\n sim_output = t.get_simulation_parameters()\n for key in sim_output.keys():\n self.assertIn(key, get_parameter_description().keys())\n self.assertEqual(get_parameter_description(key), t.get_parameter_description(key))\n for key in get_parameter_description().keys():\n self.assertIn(key, sim_output.keys())\n for key in tmp_dict.keys():\n self.assertEqual(tmp_dict[key], get_parameter_description(key))\n self.assertDictEqual(tmp_dict, get_parameter_description())\n with self.assertRaises(KeyError):\n get_parameter_description(key=\"notakey\")\n dispersal_parameters = t.dispersal_parameters()\n expected_disp_dict = {\n \"dispersal_method\": \"normal\",\n \"sigma\": 3.55,\n \"tau\": 0.470149,\n \"m_probability\": 0,\n \"cutoff\": 0,\n }\n for key in dispersal_parameters.keys():\n self.assertIn(key, tmp_dict.keys())\n self.assertIn(key, expected_disp_dict.keys())\n for key, val in expected_disp_dict.items():\n self.assertIn(key, dispersal_parameters.keys())\n if isinstance(val, float):\n self.assertAlmostEqual(val, dispersal_parameters[key])\n else:\n self.assertEqual(val, dispersal_parameters[key])\n\n\nclass TestCoalescenceTreeSettingSpeciationParameters(unittest.TestCase):\n \"\"\"Tests that the correct errors are raised when speciation parameters are supplied incorrectly.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Generates the temporary databases to attempt analysis on.\"\"\"\n src = [os.path.join(\"sample\", \"sample{}.db\".format(x)) for x in [2, 3]]\n cls.dst = [os.path.join(\"output\", \"sample{}.db\".format(x)) for x in [2, 3]]\n for tmp_src, tmp_dst in zip(src, cls.dst):\n if os.path.exists(tmp_dst):\n os.remove(tmp_dst)\n shutil.copy(tmp_src, tmp_dst)\n\n def testSetSpeciationRates(self):\n \"\"\"Tests setting speciation rates works as intended and raises appropriate errors\"\"\"\n ct = CoalescenceTree(self.dst[0])\n for attempt in [\"a string\", [\"a\", \"string\"], [[\"list\", \"list2\"], 0.2, 0.1], [None]]:\n with self.assertRaises(TypeError):\n ct._set_speciation_rates(attempt)\n with self.assertRaises(RuntimeError):\n ct._set_speciation_rates(None)\n for attempt in [-10, -2.0, 1.1, 100, [-1, 0.1, 0.2], [0.2, 0.8, 1.1]]:\n with self.assertRaises(ValueError):\n ct._set_speciation_rates(attempt)\n expected_list = [0.1, 0.2, 0.3]\n ct._set_speciation_rates(expected_list)\n self.assertEqual(expected_list, ct.applied_speciation_rates_list)\n ct._set_speciation_rates(0.2)\n self.assertEqual([0.2], ct.applied_speciation_rates_list)\n\n def testSetRecordFragments(self):\n \"\"\"Tests that setting the record_fragments flag works as expected.\"\"\"\n ct = CoalescenceTree(self.dst[0])\n ct._set_record_fragments(True)\n self.assertEqual(\"null\", ct.record_fragments)\n ct._set_record_fragments(False)\n self.assertEqual(\"F\", ct.record_fragments)\n for each in [\"PlotBiodiversityMetrics.db\", \"doesntexist.csv\"]:\n config_path = os.path.join(\"sample\", each)\n with self.assertRaises(IOError):\n ct._set_record_fragments(config_path)\n expected = os.path.join(\"sample\", \"FragmentsTest.csv\")\n ct._set_record_fragments(expected)\n self.assertEqual(expected, ct.record_fragments)\n\n def testSetRecordSpatial(self):\n \"\"\"Tests that the setting the record_spatial flag works as expected\"\"\"\n ct = CoalescenceTree(self.dst[0])\n ct._set_record_spatial(\"T\")\n self.assertTrue(ct.record_spatial)\n ct._set_record_spatial(\"F\")\n self.assertFalse(ct.record_spatial)\n with self.assertRaises(TypeError):\n ct._set_record_spatial(\"nota bool\")\n ct._set_record_spatial(True)\n self.assertTrue(ct.record_spatial)\n\n def testSetMetacommunityParameters(self):\n \"\"\"Tests that setting the metacommunity parameters works as expected.\"\"\"\n ct = CoalescenceTree(self.dst[0])\n for size, spec in [[-10, 0.1], [10, -0.1], [10, 1.1]]:\n with self.assertRaises(ValueError):\n ct.fragments = \"F\"\n ct._set_record_fragments(False)\n ct._set_record_spatial(False)\n ct.times = [0.0]\n ct._set_metacommunity_parameters(size, spec)\n ct._set_metacommunity_parameters()\n self.assertEqual(0.0, ct.metacommunity_size)\n self.assertEqual(0.0, ct.metacommunity_speciation_rate)\n ct._set_metacommunity_parameters(10, 0.1, \"simulated\")\n self.assertEqual(10, ct.metacommunity_size)\n self.assertEqual(0.1, ct.metacommunity_speciation_rate)\n\n def testSetProtractedParameters(self):\n \"\"\"Tests that setting the protracted parameters works as expected.\"\"\"\n ct = CoalescenceTree(self.dst[0])\n with self.assertRaises(ValueError):\n ct._set_protracted_parameters(0.1, 100)\n ct = CoalescenceTree(self.dst[1])\n ct._set_protracted_parameters(10, 100)\n self.assertEqual((10.0, 100.0), ct.protracted_parameters[0])\n ct.protracted_parameters = []\n for min_proc, max_proc in [[200, 5000], [80, 50], [200, 11000]]:\n with self.assertRaises(ValueError):\n ct._check_protracted_parameters(min_proc, max_proc)\n with self.assertRaises(ValueError):\n ct._set_protracted_parameters(min_proc, max_proc)\n with self.assertRaises(ValueError):\n ct.add_protracted_parameters(min_proc, max_proc)\n ct._set_protracted_parameters(50, 5000)\n self.assertEqual((50.0, 5000.0), ct.protracted_parameters[0])\n ct.protracted_parameters = []\n ct._set_protracted_parameters()\n self.assertEqual((0.0, 0.0), ct.protracted_parameters[0])\n\n def testSetSampleFile(self):\n \"\"\"Tests that the sample file is correctly set.\"\"\"\n ct = CoalescenceTree(self.dst[0])\n for file in [\"notafile.tif\", os.path.join(\"sample\", \"sample.db\")]:\n with self.assertRaises(IOError):\n ct._set_sample_file(file)\n ct._set_sample_file()\n self.assertEqual(\"null\", ct.sample_file)\n expected_file = os.path.join(\"sample\", \"SA_sample_coarse.tif\")\n ct._set_sample_file(expected_file)\n self.assertEqual(expected_file, ct.sample_file)\n\n def testSetTimes(self):\n \"\"\"Tests that times are correctly set.\"\"\"\n ct = CoalescenceTree(self.dst[0])\n ct._set_times(None)\n self.assertEqual(0.0, ct.times[0])\n with self.assertRaises(TypeError):\n ct.add_times(0.5)\n with self.assertRaises(TypeError):\n ct.add_times([0.2, 0.5, \"string\"])\n ct.times = None\n ct.add_times([0.2, 0.5, 10])\n self.assertEqual([0.0, 0.2, 0.5, 10.0], ct.times)\n ct.times = None\n ct._set_times(0.2)\n self.assertEqual([0.0, 0.2], ct.times)\n ct.times = None\n ct._set_times([0.1, 0.5, 10.0])\n self.assertEqual([0.0, 0.1, 0.5, 10.0], ct.times)\n\n\nclass TestCoalescenceTreeParameters(unittest.TestCase):\n \"\"\"Tests that parameters are correctly obtained from the databases and the relevant errors are raised.\"\"\"\n\n def testCommunityParameters1(self):\n \"\"\"Tests the community parameters make sense in a very simple community.\"\"\"\n shutil.copyfile(os.path.join(\"sample\", \"sample3.db\"), os.path.join(\"output\", \"temp_sample3.db\"))\n t = CoalescenceTree(os.path.join(\"output\", \"temp_sample3.db\"), logging_level=50)\n self.assertEqual([], t.get_metacommunity_references())\n self.assertEqual([1], t.get_community_references())\n params = t.get_community_parameters(1)\n expected_dict = {\n \"speciation_rate\": 0.001,\n \"time\": 0.0,\n \"fragments\": 0,\n \"metacommunity_reference\": 0,\n \"min_speciation_gen\": 100.0,\n \"max_speciation_gen\": 10000.0,\n }\n self.assertEqual(expected_dict, params)\n with self.assertRaises(sqlite3.Error):\n t.get_metacommunity_parameters(1)\n with self.assertRaises(KeyError):\n t.get_community_parameters(2)\n with self.assertRaises(KeyError):\n t.get_community_reference(0.1, 0.0, 0, 0, 0.0, min_speciation_gen=100.0, max_speciation_gen=10000.0)\n with self.assertRaises(KeyError):\n _ = t.get_community_reference(speciation_rate=0.001, time=0.0, fragments=False)\n ref = t.get_community_reference(\n speciation_rate=0.001, time=0.0, fragments=False, min_speciation_gen=100.0, max_speciation_gen=10000.0\n )\n self.assertEqual(1, ref)\n self.assertEqual(expected_dict, t.get_community_parameters(ref))\n t.wipe_data()\n with self.assertRaises(IOError):\n t.get_community_parameters_pd()\n\n def testCommunityParameters2(self):\n \"\"\"Tests the community parameters make sense in a very simple community.\"\"\"\n t = CoalescenceTree(os.path.join(\"sample\", \"sample4.db\"))\n self.assertEqual([1, 2, 3, 4, 5], t.get_community_references())\n expected_params1 = {\"speciation_rate\": 0.1, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 0}\n expected_params2 = {\"speciation_rate\": 0.1, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 1}\n expected_params3 = {\"speciation_rate\": 0.2, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 1}\n expected_params4 = {\"speciation_rate\": 0.1, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 2}\n expected_params5 = {\"speciation_rate\": 0.2, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 2}\n expected_meta_params1 = {\n \"speciation_rate\": 0.001,\n \"metacommunity_size\": 10000.0,\n \"option\": \"simulated\",\n \"external_reference\": 0,\n }\n expected_meta_params2 = {\n \"speciation_rate\": 0.001,\n \"metacommunity_size\": 10000.0,\n \"option\": \"analytical\",\n \"external_reference\": 0,\n }\n\n params1 = t.get_community_parameters(1)\n params2 = t.get_community_parameters(2)\n params3 = t.get_community_parameters(3)\n params4 = t.get_community_parameters(4)\n params5 = t.get_community_parameters(5)\n params6 = t.get_metacommunity_parameters(1)\n params7 = t.get_metacommunity_parameters(2)\n self.assertEqual([1, 2], t.get_metacommunity_references())\n self.assertEqual(expected_params1, params1)\n self.assertEqual(expected_params2, params2)\n self.assertEqual(expected_params3, params3)\n self.assertEqual(expected_params4, params4)\n self.assertEqual(expected_params5, params5)\n self.assertEqual(expected_meta_params1, params6)\n self.assertEqual(expected_meta_params2, params7)\n with self.assertRaises(KeyError):\n t.get_community_parameters(6)\n with self.assertRaises(KeyError):\n t.get_metacommunity_parameters(3)\n ref1 = t.get_community_reference(speciation_rate=0.1, time=0.0, fragments=False)\n with self.assertRaises(KeyError):\n t.get_community_reference(\n speciation_rate=0.1, time=0.0, fragments=False, min_speciation_gen=0.1, max_speciation_gen=10000.0\n )\n ref2 = t.get_community_reference(\n speciation_rate=0.1,\n time=0.0,\n fragments=False,\n metacommunity_size=10000.0,\n metacommunity_speciation_rate=0.001,\n metacommunity_option=\"simulated\",\n )\n with self.assertRaises(KeyError):\n t.get_community_reference(\n speciation_rate=0.1,\n time=0.0,\n fragments=False,\n metacommunity_size=10000.0,\n metacommunity_speciation_rate=0.01,\n metacommunity_option=\"simulated\",\n )\n ref3 = t.get_community_reference(\n speciation_rate=0.2,\n time=0.0,\n fragments=False,\n metacommunity_size=10000.0,\n metacommunity_speciation_rate=0.001,\n metacommunity_option=\"simulated\",\n )\n ref4 = t.get_community_reference(\n speciation_rate=0.1,\n time=0.0,\n fragments=False,\n metacommunity_size=10000.0,\n metacommunity_speciation_rate=0.001,\n metacommunity_option=\"analytical\",\n )\n ref5 = t.get_community_reference(\n speciation_rate=0.2,\n time=0.0,\n fragments=False,\n metacommunity_size=10000.0,\n metacommunity_speciation_rate=0.001,\n metacommunity_option=\"analytical\",\n )\n self.assertEqual(1, ref1)\n self.assertEqual(2, ref2)\n self.assertEqual(3, ref3)\n self.assertEqual(4, ref4)\n self.assertEqual(5, ref5)\n expected_community_params_list = []\n for reference in t.get_community_references():\n params = t.get_community_parameters(reference)\n params[\"reference\"] = reference\n expected_community_params_list.append(params)\n expected_community_params = pd.DataFrame(expected_community_params_list)\n actual_output = t.get_community_parameters_pd()\n assert_frame_equal(expected_community_params, actual_output, check_like=True)\n\n def testIsComplete(self):\n \"\"\"Tests sims are correctly identified as complete.\"\"\"\n t = CoalescenceTree(os.path.join(\"sample\", \"sample4.db\"))\n self.assertTrue(t.is_complete)\n\n\nclass TestCoalescenceTreeAnalysis(unittest.TestCase):\n \"\"\"Tests analysis is performed correctly\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Sets up the Coalescence object test case.\"\"\"\n dst1 = os.path.join(\"output\", \"sampledb0.db\")\n for i in range(0, 11):\n dst = os.path.join(\"output\", \"sampledb{}.db\".format(i))\n if os.path.exists(dst):\n os.remove(dst)\n shutil.copyfile(os.path.join(\"sample\", \"sample.db\"), dst)\n shutil.copyfile(os.path.join(\"sample\", \"nse_reference.db\"), os.path.join(\"output\", \"nse_reference1.db\"))\n random.seed(2)\n cls.test = CoalescenceTree(dst1, logging_level=50)\n cls.test.clear_calculations()\n cls.test.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n cls.test.calculate_fragment_richness()\n cls.test.calculate_fragment_octaves()\n cls.test.calculate_octaves_error()\n cls.test.calculate_alpha_diversity()\n cls.test.calculate_beta_diversity()\n cls.test2 = CoalescenceTree()\n cls.test2.set_database(os.path.join(\"sample\", \"sample_nofrag.db\"))\n dstx = os.path.join(\"output\", \"sampledbx.db\")\n shutil.copyfile(dst1, dstx)\n c = CoalescenceTree(dstx)\n c.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n c.calculate_goodness_of_fit()\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"\n Removes the files from output.\"\n \"\"\"\n cls.test.clear_calculations()\n\n def testComparisonDataNoExistError(self):\n c = CoalescenceTree(os.path.join(\"sample\", \"sample.db\"))\n with self.assertRaises(IOError):\n c.import_comparison_data(os.path.join(\"sample\", \"doesnotexist.db\"))\n\n def testFragmentOctaves(self):\n num = self.test.cursor.execute(\n \"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0\"\n \" AND community_reference == 1\"\n ).fetchall()[0][0]\n self.assertEqual(num, 7, msg=\"Fragment octaves not correctly calculated.\")\n num = self.test.cursor.execute(\n \"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0 \"\n \" AND community_reference == 2\"\n ).fetchall()[0][0]\n self.assertEqual(num, 7, msg=\"Fragment octaves not correctly calculated.\")\n num = self.test.cursor.execute(\n \"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'cerrogalera' AND octave == 1 \"\n \" AND community_reference == 1\"\n ).fetchall()[0][0]\n self.assertEqual(num, 3, msg=\"Fragment octaves not correctly calculated.\")\n num = self.test.cursor.execute(\n \"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'whole' AND octave == 1 \"\n \" AND community_reference == 2\"\n ).fetchall()[0][0]\n self.assertEqual(num, 221, msg=\"Fragment octaves not correctly calculated.\")\n\n def testFragmentAbundances(self):\n \"\"\"\n Tests that fragment abundances are produced properly by the fragment detection functions.\n\n \"\"\"\n num = self.test.cursor.execute(\n \"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' \" \" AND community_reference == 1\"\n ).fetchall()[0][0]\n self.assertEqual(num, 9, msg=\"Fragment abundances not correctly calculated.\")\n num = self.test.cursor.execute(\n \"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' \" \" AND community_reference == 2\"\n ).fetchall()[0][0]\n self.assertEqual(num, 9, msg=\"Fragment abundances not correctly calculated.\")\n num = self.test.cursor.execute(\n \"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'cerrogalera' \"\n \" AND community_reference == 1\"\n ).fetchall()[0][0]\n self.assertEqual(num, 9, msg=\"Fragment abundances not correctly calculated.\")\n\n def testSpeciesAbundances(self):\n \"\"\"Tests that the produced species abundances are correct by comparing species richness.\"\"\"\n num = self.test.cursor.execute(\n \"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 2\"\n ).fetchall()[0][0]\n self.assertEqual(num, 1029, msg=\"Species abundances not correctly calculated.\")\n num = self.test.cursor.execute(\n \"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 1\"\n ).fetchall()[0][0]\n self.assertEqual(num, 884, msg=\"Species abundances not correctly calculated.\")\n\n def testGetOctaves(self):\n \"\"\"Tests getting the octaves.\"\"\"\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb4.db\"))\n c.clear_calculations()\n c.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n c.calculate_richness()\n self.assertEqual([[0, 585], [1, 231], [2, 59], [3, 5]], c.get_octaves(1))\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb4.db\"))\n c.clear_calculations()\n c.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n c.calculate_richness()\n actual = c.get_octaves_pd().head()\n expected = pd.DataFrame(\n [[1, 0, 585], [1, 1, 231], [1, 2, 59], [1, 3, 5], [2, 0, 760]],\n columns=[\"community_reference\", \"octave\", \"richness\"],\n )\n assert_frame_equal(actual, expected, check_like=True)\n\n def testSpeciesLocations(self):\n \"\"\"\n Tests that species locations have been correctly assigned.\n \"\"\"\n num = self.test.cursor.execute(\n \"SELECT species_id FROM SPECIES_LOCATIONS WHERE x==1662 AND y==4359 \" \" AND community_reference == 1\"\n ).fetchall()\n self.assertEqual(len(set(num)), 2, msg=\"Species locations not correctly assigned\")\n all_list = self.test.get_species_locations()\n select_list = self.test.get_species_locations(community_reference=1)\n self.assertListEqual([1, 1662, 4359, 1], all_list[0])\n self.assertListEqual([1, 1662, 4359], select_list[0])\n\n def testAlphaDiversity(self):\n \"\"\"\n Tests that alpha diversity is correctly calculated and fetched for each parameter reference\n \"\"\"\n c = CoalescenceTree(os.path.join(\"sample\", \"sample.db\"))\n with self.assertRaises(IOError):\n c.get_alpha_diversity_pd()\n self.assertEqual(9, self.test.get_alpha_diversity(1))\n self.assertEqual(10, self.test.get_alpha_diversity(2))\n expected_alphas_list = []\n for reference in self.test.get_community_references():\n expected_alphas_list.append(\n {\"community_reference\": reference, \"alpha_diversity\": self.test.get_alpha_diversity(reference)}\n )\n expected_alphas = pd.DataFrame(expected_alphas_list).reset_index(drop=True)\n actual_alphas = self.test.get_alpha_diversity_pd().reset_index(drop=True)\n assert_frame_equal(expected_alphas, actual_alphas, check_like=True)\n\n def testBetaDiversity(self):\n \"\"\"\n Tests that beta diversity is correctly calculated and fetched for the reference\n \"\"\"\n c = CoalescenceTree(os.path.join(\"sample\", \"sample.db\"))\n with self.assertRaises(IOError):\n c.get_beta_diversity_pd()\n self.assertAlmostEqual(98.111111111, self.test.get_beta_diversity(1), places=5)\n self.assertAlmostEqual(102.8, self.test.get_beta_diversity(2), places=5)\n expected_betas_list = []\n for reference in self.test.get_community_references():\n expected_betas_list.append(\n {\"community_reference\": reference, \"beta_diversity\": self.test.get_beta_diversity(reference)}\n )\n expected_betas = pd.DataFrame(expected_betas_list).reset_index(drop=True)\n actual_betas = self.test.get_beta_diversity_pd().reset_index(drop=True)\n assert_frame_equal(expected_betas, actual_betas, check_like=True)\n\n def testGetNumberIndividuals(self):\n \"\"\"Tests that the number of individuals is obtained correctly.\"\"\"\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb7.db\"))\n self.assertEqual(1504, c.get_number_individuals(community_reference=1))\n self.assertEqual(12, c.get_number_individuals(fragment=\"P09\", community_reference=1))\n c.wipe_data()\n c.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n with self.assertRaises(IOError):\n c.get_number_individuals(fragment=\"none\")\n with self.assertRaises(IOError):\n c.get_number_individuals()\n\n def testGetFragmentAbundances(self):\n \"\"\"Tests that fragment abundances are correctly obtained.\"\"\"\n c = CoalescenceTree(os.path.join(\"sample\", \"sample3.db\"))\n with self.assertRaises(IOError):\n c.get_fragment_abundances(fragment=\"P09\", reference=1)\n with self.assertRaises(IOError):\n c.get_fragment_abundances_pd()\n abundances = self.test.get_fragment_abundances(fragment=\"P09\", reference=1)\n expected_abundances = [[302, 1], [303, 1], [304, 1], [305, 1], [306, 1], [307, 1], [546, 2], [693, 1], [732, 3]]\n self.assertEqual(expected_abundances, abundances[:10])\n all_abundances = self.test.get_all_fragment_abundances()\n expected_abundances2 = [\n [1, \"P09\", 302, 1],\n [1, \"P09\", 303, 1],\n [1, \"P09\", 304, 1],\n [1, \"P09\", 305, 1],\n [1, \"P09\", 306, 1],\n [1, \"P09\", 307, 1],\n [1, \"P09\", 546, 2],\n [1, \"P09\", 693, 1],\n [1, \"P09\", 732, 3],\n [1, \"cerrogalera\", 416, 1],\n ]\n self.assertEqual(expected_abundances2, all_abundances[:10])\n df = pd.DataFrame(\n expected_abundances2, columns=[\"community_reference\", \"fragment\", \"species_id\", \"no_individuals\"]\n )\n actual_df = self.test.get_fragment_abundances_pd().head(n=10)\n assert_frame_equal(df, actual_df, check_like=True)\n\n def testGetFragmentListErrors(self):\n \"\"\"Tests the error is raised when obtaining fragment list.\"\"\"\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb8.db\"))\n c.wipe_data()\n with self.assertRaises(IOError):\n c.get_fragment_list()\n\n def testClearGoodnessFit(self):\n \"\"\"Tests that goodness of fit are correctly cleared.\"\"\"\n c = CoalescenceTree(os.path.join(\"output\", \"sampledbx.db\"))\n exec_command = \"SELECT * FROM BIODIVERSITY_METRICS WHERE metric LIKE 'goodness_%'\"\n self.assertTrue(len(c.cursor.execute(exec_command).fetchall()) >= 1)\n c._clear_goodness_of_fit()\n self.assertFalse(len(c.cursor.execute(exec_command).fetchall()) >= 1)\n\n def testGetBiodiversityMetrics(self):\n \"\"\"Tests that biodiversity metrics are correctly obtained from the database.\"\"\"\n c1 = CoalescenceTree(os.path.join(\"sample\", \"sample.db\"))\n with self.assertRaises(IOError):\n c1.get_biodiversity_metrics()\n c2 = CoalescenceTree(os.path.join(\"sample\", \"sample2.db\"))\n\n expected_biodiversity_metrics = pd.DataFrame(\n [\n [1, \"fragment_richness\", \"fragment2\", 129.0, np.NaN, np.NaN],\n [2, \"fragment_richness\", \"fragment2\", 130.0, np.NAN, np.NaN],\n [1, \"fragment_richness\", \"fragment1\", 174.0, np.NaN, np.NaN],\n [2, \"fragment_richness\", \"fragment1\", 175.0, np.NaN, np.NaN],\n [1, \"fragment_richness\", \"whole\", 1163.0, np.NaN, np.NaN],\n [2, \"fragment_richness\", \"whole\", 1170.0, np.NaN, np.NaN],\n ],\n columns=[\"community_reference\", \"metric\", \"fragment\", \"value\", \"simulated\", \"actual\"],\n ).reset_index(drop=True)\n actual_biodiversity_metrics = c2.get_biodiversity_metrics().reset_index(drop=True).fillna(value=np.nan)\n assert_frame_equal(expected_biodiversity_metrics, actual_biodiversity_metrics)\n\n def testRaisesErrorNoFragmentsAlpha(self):\n \"\"\"\n Tests that an error is raised when alpha diversity is calculated without any fragment abundance data\n \"\"\"\n with self.assertRaises(IOError):\n self.test2.calculate_alpha_diversity()\n\n def testRaisesErrorNoFragmentsBeta(self):\n \"\"\"\n Tests that an error is raised when alpha diversity is calculated without any fragment abundance data\n \"\"\"\n with self.assertRaises(IOError):\n self.test2.calculate_beta_diversity()\n\n def testRaisesErrorNoFragmentsRichness(self):\n \"\"\"\n Tests that an error is raised when fragment richness is calculated without any fragment abundance data\n \"\"\"\n with self.assertRaises(IOError):\n self.test2.calculate_fragment_richness()\n\n def testRaisesErrorNoFragmentsOctaves(self):\n \"\"\"\n Tests that an error is raised when fragment richness is calculated without any fragment abundance data\n \"\"\"\n with self.assertRaises(IOError):\n self.test2.calculate_fragment_octaves()\n\n @unittest.skipIf(sys.version[0] != \"3\", \"Skipping Python 3.x tests\")\n def testModelFitting2(self):\n \"\"\"\n Tests that the goodness-of-fit calculations are correctly performed.\n \"\"\"\n random.seed(2)\n self.test.calculate_goodness_of_fit()\n self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)\n self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)\n self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)\n\n @unittest.skipIf(sys.version[0] == \"3\", \"Skipping Python 2.x tests\")\n def testModelFitting3(self):\n \"\"\"\n Tests that the goodness-of-fit calculations are correctly performed.\n \"\"\"\n random.seed(2)\n self.test.calculate_goodness_of_fit()\n self.assertAlmostEqual(self.test.get_goodness_of_fit(), 0.30140801329929373, places=6)\n self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_octaves(), 0.0680205429120108, places=6)\n self.assertAlmostEqual(self.test.get_goodness_of_fit_fragment_richness(), 0.9244977999898334, places=6)\n\n def testErrorIfNotApplied(self):\n \"\"\"Tests that an error is raised if outputting is attempted without applying any community parameters.\"\"\"\n c = CoalescenceTree(os.path.join(\"sample\", \"sample.db\"))\n with self.assertRaises(RuntimeError):\n c.output()\n\n def testFragmentNumbersMatching(self):\n \"\"\"Checks behaviour when matching fragment numbers.\"\"\"\n test = CoalescenceTree(os.path.join(\"output\", \"sampledb1.db\"), logging_level=50)\n test.clear_calculations()\n with self.assertRaises(RuntimeError):\n test._check_fragment_numbers_match()\n with self.assertRaises(ValueError):\n test.calculate_fragment_abundances()\n test._check_fragment_numbers_match()\n test.comparison_file = os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\")\n self.assertTrue(test._check_fragment_numbers_match())\n test.fragment_abundances.pop(0)\n self.assertFalse(test._check_fragment_numbers_match())\n\n def testFragmentNumbersEqualisation(self):\n \"\"\"Checks behaviour when equalising fragment numbers.\"\"\"\n test = CoalescenceTree(os.path.join(\"output\", \"sampledb2.db\"), logging_level=50)\n test.clear_calculations()\n test.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n test.calculate_fragment_richness()\n self.test._equalise_fragment_number(\"notafrag\", 1)\n test.fragment_abundances[0][2] += 1000\n test._equalise_fragment_number(\"P09\", 1)\n self.assertTrue(test._check_fragment_numbers_match())\n\n def testFragmentNumbersErrors(self):\n \"\"\"Checks behaviour when equalising fragment numbers.\"\"\"\n test = CoalescenceTree(os.path.join(\"output\", \"sampledb3.db\"), logging_level=50)\n test.clear_calculations()\n test.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n test.comparison_abundances = None\n with self.assertRaises(ValueError):\n test._equalise_all_fragment_numbers()\n\n def testAdjustBiodiversityMetrics(self):\n \"\"\"Checks that biodiversity metrics are correctly adjusted.\"\"\"\n test = CoalescenceTree(os.path.join(\"output\", \"sampledb5.db\"), logging_level=50)\n test.clear_calculations()\n test.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n test.adjust_data()\n\n def testComparisonOctavesModification(self):\n \"\"\"Tests that the comparison database is modified.\"\"\"\n test = CoalescenceTree(os.path.join(\"output\", \"sampledb6.db\"), logging_level=50)\n dst = os.path.join(\"output\", \"PlotBiodiversityMetricsNoAlpha2.db\")\n shutil.copy(os.path.join(\"sample\", \"PlotBiodiversityMetricsNoAlpha.db\"), dst)\n test.import_comparison_data(dst)\n test.calculate_comparison_octaves(store=True)\n self.assertTrue(os.path.exists(dst))\n\n @unittest.skipIf(sys.version[0] == \"2\", \"Skipping Python 3.x tests\")\n def testDownsamplingAndRevert(self):\n \"\"\"Tests that downsampling works as intended and can be reverted.\"\"\"\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb9.db\"))\n random.seed(a=10, version=3)\n original_individuals = c.get_number_individuals()\n original_richness = c.get_species_richness_pd()\n c.wipe_data()\n with self.assertRaises(ValueError):\n c.downsample(sample_proportion=2.0)\n c.downsample(sample_proportion=0.1)\n c.set_speciation_parameters([0.1, 0.2])\n c.apply()\n new_individuals = c.get_number_individuals()\n self.assertEqual(1452, new_individuals)\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST\"))\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST_ORIGINAL\"))\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb9.db\"))\n c.revert_downsample()\n c.wipe_data()\n c.set_speciation_parameters([0.1, 0.2])\n c.apply()\n final_individuals = c.get_number_individuals()\n assert_frame_equal(original_richness, c.get_species_richness_pd())\n self.assertEqual(original_individuals, final_individuals)\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST\"))\n self.assertFalse(check_sql_table_exist(c.database, \"SPECIES_LIST_ORIGINAL\"))\n # Now test with NSE sim to ensure correct sampling\n c = CoalescenceTree(os.path.join(\"output\", \"nse_reference1.db\"))\n nse_richness = c.get_species_richness_pd()\n nse_no_individuals = c.get_number_individuals()\n c.wipe_data()\n c.downsample(sample_proportion=0.1)\n c.set_speciation_parameters([0.000001, 0.999999])\n c.apply()\n new_no_individuals = c.get_number_individuals()\n self.assertAlmostEqual(new_no_individuals / nse_no_individuals, 0.1, 5)\n self.assertEqual(1000, c.get_species_richness(reference=2))\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST\"))\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST_ORIGINAL\"))\n c = CoalescenceTree(os.path.join(\"output\", \"nse_reference1.db\"))\n c.revert_downsample()\n c.wipe_data()\n c.set_speciation_parameters([0.000001, 0.999999])\n c.apply_incremental()\n c.set_speciation_parameters([0.5])\n c.apply()\n actual_richness = c.get_species_richness_pd()\n assert_frame_equal(nse_richness, actual_richness)\n self.assertEqual(nse_no_individuals, c.get_number_individuals())\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST\"))\n self.assertFalse(check_sql_table_exist(c.database, \"SPECIES_LIST_ORIGINAL\"))\n with self.assertRaises(IOError):\n c.revert_downsample()\n\n @unittest.skipIf(sys.version[0] == \"2\", \"Skipping Python 3.x tests\")\n def testDownsamplingByLocationAndRevert(self):\n \"\"\"Tests that downsampling works as intended and can be reverted.\"\"\"\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb10.db\"))\n random.seed(a=10, version=3)\n original_individuals = c.get_number_individuals()\n original_richness = c.get_species_richness_pd()\n c.wipe_data()\n with self.assertRaises(ValueError):\n c.downsample_at_locations(fragment_csv=os.path.join(\"sample\", \"FragmentsTestFail1.csv\"))\n with self.assertRaises(IOError):\n c.downsample_at_locations(fragment_csv=\"not_a_file.csv\")\n c.downsample_at_locations(fragment_csv=os.path.join(\"sample\", \"FragmentsTest3.csv\"))\n c.set_speciation_parameters([0.1, 0.2])\n c.apply()\n new_individuals = c.get_number_individuals()\n self.assertEqual(2, new_individuals)\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST\"))\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST_ORIGINAL\"))\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb10.db\"))\n c.revert_downsample()\n c.wipe_data()\n c.set_speciation_parameters([0.1, 0.2])\n c.apply()\n final_individuals = c.get_number_individuals()\n assert_frame_equal(original_richness, c.get_species_richness_pd())\n self.assertEqual(original_individuals, final_individuals)\n self.assertTrue(check_sql_table_exist(c.database, \"SPECIES_LIST\"))\n self.assertFalse(check_sql_table_exist(c.database, \"SPECIES_LIST_ORIGINAL\"))\n c = CoalescenceTree(os.path.join(\"output\", \"sampledb10.db\"))\n c.wipe_data()\n c.downsample_at_locations(fragment_csv=os.path.join(\"sample\", \"FragmentsTest4.csv\"), ignore_errors=True)\n c.set_speciation_parameters([0.1, 0.2])\n c.apply()\n new_individuals = c.get_number_individuals()\n self.assertEqual(3, new_individuals)\n\n\nclass TestCoalescenceTreeWriteCsvs(unittest.TestCase):\n \"\"\"Tests that csvs are correctly outputted.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Creates the CoalescenceTree object.\"\"\"\n cls.c = CoalescenceTree(os.path.join(\"sample\", \"nse_reference.db\"))\n\n def testWriteCommunityParameterToCsv(self):\n \"\"\"Tests that community parameters are correctly written to a csv.\"\"\"\n output_csv = os.path.join(\"output\", \"community_parameters1.csv\")\n self.c.write_to_csv(output_csv, \"COMMUNITY_PARAMETERS\")\n self.assertTrue(os.path.exists(output_csv))\n import csv\n\n if sys.version_info[0] < 3: # pragma: no cover\n infile = open(output_csv, \"rb\")\n else:\n infile = open(output_csv, \"r\")\n expected_output = [\n [\"reference\", \"speciation_rate\", \"time\", \"fragments\", \"metacommunity_reference\"],\n [\"1\", \"1e-06\", \"0.0\", \"0\", \"0\"],\n [\"2\", \"0.99999\", \"0.0\", \"0\", \"0\"],\n [\"3\", \"0.5\", \"0.0\", \"0\", \"0\"],\n ]\n actual_output = []\n with infile as csv_file:\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n actual_output.append(row)\n self.assertEqual(expected_output, actual_output)\n with self.assertRaises(IOError):\n self.c.write_to_csv(output_csv, \"COMMUNITY_PARAMETERS\")\n with self.assertRaises(KeyError):\n self.c.write_to_csv(\"notacsv.csv\", \"NOTATABLE\")\n\n def testWritesAllCsvs(self):\n \"\"\"Tests that all csvs write to the output correctly.\"\"\"\n output_dir = os.path.join(\"output\", \"csvdir\")\n if os.path.exists(output_dir):\n os.remove(output_dir)\n self.c.write_all_to_csvs(output_dir, \"out1\")\n expected_tables = [\"COMMUNITY_PARAMETERS\", \"SIMULATION_PARAMETERS\", \"SPECIES_ABUNDANCES\", \"SPECIES_LIST\"]\n for table in expected_tables:\n self.assertTrue(os.path.exists(os.path.join(output_dir, \"out1_{}.csv\".format(table))))\n for file in os.listdir(output_dir):\n if \".csv\" in file:\n self.assertIn(file, [\"out1_{}.csv\".format(x) for x in expected_tables])\n self.c.write_all_to_csvs(output_dir, \"out2.csv\")\n for table in expected_tables:\n self.assertTrue(os.path.exists(os.path.join(output_dir, \"out2_{}.csv\".format(table))))\n self.c.write_all_to_csvs(output_dir, \"out3.\")\n for table in expected_tables:\n self.assertTrue(os.path.exists(os.path.join(output_dir, \"out3_{}.csv\".format(table))))\n\n\nclass TestCoalescenceTreeSpeciesDistances(unittest.TestCase):\n \"\"\"Tests analysis is performed correctly.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Sets up the Coalescence object test case.\n \"\"\"\n dst = os.path.join(\"output\", \"sampledb1.db\")\n if os.path.exists(dst):\n os.remove(dst)\n shutil.copyfile(os.path.join(\"sample\", \"sample.db\"), dst)\n cls.test = CoalescenceTree(dst)\n cls.test.clear_calculations()\n cls.test.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetrics.db\"))\n cls.test.calculate_species_distance_similarity()\n\n def testSpeciesDistanceSimilarity(self):\n \"\"\"\n Tests that the species distance similarity function works as intended.\n \"\"\"\n mean = self.test.cursor.execute(\n \"SELECT value FROM BIODIVERSITY_METRICS WHERE community_reference == 1 AND \"\n \"metric == 'mean_distance_between_individuals'\"\n ).fetchone()[0]\n self.assertAlmostEqual(mean, 5.423769507803121, places=5)\n species_distances = self.test.get_species_distance_similarity(community_reference=1)\n # for distance, similar in species_distances:\n # \tself.assertLessEqual(similar, dissimilar)\n self.assertListEqual(species_distances[0], [0, 11])\n self.assertListEqual(species_distances[1], [1, 274])\n self.assertListEqual(species_distances[2], [2, 289])\n\n\nclass TestCoalescenceTreeAnalyseIncorrectComparison(unittest.TestCase):\n \"\"\"\n Tests errors are raised correctly for incorrect comparison data.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Sets up the Coalescence object test case.\n \"\"\"\n random.seed(10)\n dst = os.path.join(\"output\", \"sampledb2.db\")\n if os.path.exists(dst):\n os.remove(dst)\n shutil.copyfile(os.path.join(\"sample\", \"sample.db\"), dst)\n cls.test = CoalescenceTree(logging_level=40)\n cls.test.set_database(dst)\n cls.test.import_comparison_data(os.path.join(\"sample\", \"PlotBiodiversityMetricsNoAlpha.db\"))\n cls.test.calculate_comparison_octaves(False)\n cls.test.clear_calculations()\n cls.test.calculate_fragment_richness()\n cls.test.calculate_fragment_octaves()\n cls.test.calculate_octaves_error()\n cls.test.calculate_alpha_diversity()\n cls.test.calculate_alpha_diversity()\n cls.test.calculate_beta_diversity()\n cls.test2 = CoalescenceTree()\n cls.test2.set_database(os.path.join(\"sample\", \"sample_nofrag.db\"))\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"\n Removes the files from output.\"\n \"\"\"\n cls.test.clear_calculations()\n\n def testRaisesErrorMismatchParameters(self):\n \"\"\"\n Tests that an error is raised when there is a parameter mismatch\n \"\"\"\n with self.assertRaises(ValueError):\n self.test.calculate_goodness_of_fit()\n\n\nclass TestSimulationAnalysisTemporal(unittest.TestCase):\n \"\"\"Tests that applying multiple times works as expected.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Generates the analysis object.\"\"\"\n src = os.path.join(\"sample\", \"sample2.db\")\n dst = os.path.join(\"output\", \"sample2.db\")\n if not os.path.exists(dst):\n shutil.copy(src, dst)\n cls.tree = CoalescenceTree()\n cls.tree.set_database(dst)\n cls.tree.wipe_data()\n\n def testTimesWrongFormatError(self):\n \"\"\"Tests that an error is raised when the times are in the wrong format.\"\"\"\n with self.assertRaises(TypeError):\n self.tree.set_speciation_parameters([0.4, 0.6], times=[0.1, 0.2, \"notafloat\"])\n with self.assertRaises(TypeError):\n # noinspection PyTypeChecker\n self.tree.set_speciation_parameters([0.4, 0.6], times=\"notafloat\")\n self.tree.times = []\n self.tree.set_speciation_parameters([0.4, 0.6], times=[0, 1, 10])\n self.assertEqual([0.0, 1.0, 10.0], self.tree.times)\n\n\nclass TestSimulationAnalysis(unittest.TestCase):\n \"\"\"\n Tests that the simulation can perform all required analyses, and that the correct errors are thrown if the object\n does not exist.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Copies the sample databases and applies a basic set of community parameters.\"\"\"\n src = os.path.join(\"sample\", \"sample2.db\")\n dst = os.path.join(\"output\", \"sample2.db\")\n if os.path.exists(dst):\n os.remove(dst)\n shutil.copy(src, dst)\n cls.tree = CoalescenceTree(logging_level=50)\n cls.tree.set_database(dst)\n cls.tree.wipe_data()\n cls.tree.set_speciation_parameters(\n speciation_rates=[0.5, 0.7],\n record_spatial=\"T\",\n record_fragments=os.path.join(\"sample\", \"FragmentsTest.csv\"),\n sample_file=os.path.join(\"sample\", \"SA_samplemaskINT.tif\"),\n )\n cls.tree.apply()\n cls.tree.calculate_fragment_richness()\n cls.tree.calculate_fragment_octaves()\n np.random.seed(100)\n\n def testSetDatabaseErrors(self):\n \"\"\"Tests that the set database errors are correctly raised.\"\"\"\n sim = Simulation()\n c = CoalescenceTree()\n with self.assertRaises(RuntimeError):\n c.set_database(sim)\n c = CoalescenceTree()\n with self.assertRaises(IOError):\n c.set_database(os.path.join(\"sample\", \"failsampledoesntexist.db\"))\n\n def testFragmentConfigNoExistError(self):\n \"\"\"Tests that an error is raised if the fragment config file does not exist.\"\"\"\n tree = CoalescenceTree(self.tree.file)\n with self.assertRaises(IOError):\n tree.set_speciation_parameters(\n speciation_rates=[0.5, 0.7],\n record_spatial=\"T\",\n record_fragments=os.path.join(\"sample\", \"notafragmentconfig.csv\"),\n sample_file=os.path.join(\"sample\", \"SA_samplemaskINT.tif\"),\n )\n with self.assertRaises(IOError):\n tree.set_speciation_parameters(\n speciation_rates=[0.5, 0.7],\n record_spatial=\"T\",\n record_fragments=os.path.join(\"sample\", \"example_historical_fine.tif\"),\n sample_file=os.path.join(\"sample\", \"SA_samplemaskINT.tif\"),\n )\n\n def testReadsFragmentsRichness(self):\n \"\"\"\n Tests that the fragment richness can be read correctly\n \"\"\"\n sim_params = self.tree.get_simulation_parameters()\n expected_params = dict(\n seed=9,\n task=1,\n output_dir=\"output\",\n speciation_rate=0.5,\n sigma=2.828427,\n tau=2.0,\n deme=1,\n sample_size=0.1,\n max_time=2.0,\n dispersal_relative_cost=1.0,\n min_num_species=1,\n habitat_change_rate=0.0,\n gen_since_historical=200.0,\n time_config_file=\"null\",\n coarse_map_file=\"sample/SA_sample_coarse.tif\",\n coarse_map_x=35,\n coarse_map_y=41,\n coarse_map_x_offset=11,\n coarse_map_y_offset=14,\n coarse_map_scale=1.0,\n fine_map_file=\"sample/SA_sample_fine.tif\",\n fine_map_x=13,\n fine_map_y=13,\n fine_map_x_offset=0,\n fine_map_y_offset=0,\n sample_file=\"sample/SA_samplemaskINT.tif\",\n grid_x=13,\n grid_y=13,\n sample_x=13,\n sample_y=13,\n sample_x_offset=0,\n sample_y_offset=0,\n historical_coarse_map=\"none\",\n historical_fine_map=\"none\",\n sim_complete=1,\n dispersal_method=\"normal\",\n m_probability=0.0,\n cutoff=0.0,\n landscape_type=\"closed\",\n protracted=0,\n min_speciation_gen=0.0,\n max_speciation_gen=0.0,\n dispersal_map=\"none\",\n )\n for key in sim_params.keys():\n self.assertEqual(\n sim_params[key],\n expected_params[key],\n msg=\"Error in {}: {} != {}\".format(key, sim_params[key], expected_params[key]),\n )\n fragment2_richness = [\"fragment2\", 1, 129]\n self.assertEqual(self.tree.get_fragment_richness(fragment=\"fragment2\", reference=1), 129)\n self.assertEqual(self.tree.get_fragment_richness(fragment=\"fragment1\", reference=2), 175)\n octaves = self.tree.get_fragment_richness()\n self.assertListEqual(fragment2_richness, [list(x) for x in octaves if x[0] == \"fragment2\" and x[1] == 1][0])\n expected_fragment_richness = []\n for reference in self.tree.get_community_references():\n for fragment in self.tree.get_fragment_list(reference):\n fragment_richness = self.tree.get_fragment_richness(fragment=fragment, reference=reference)\n expected_fragment_richness.append(\n {\"fragment\": fragment, \"community_reference\": reference, \"fragment_richness\": fragment_richness}\n )\n expected_fragment_richness_df = (\n pd.DataFrame(expected_fragment_richness)\n .sort_values(by=[\"fragment\", \"community_reference\"])\n .reset_index(drop=True)\n )\n actual_fragment_richness = self.tree.get_fragment_richness_pd().reset_index(drop=True)\n assert_frame_equal(expected_fragment_richness_df, actual_fragment_richness, check_like=True)\n\n def testGetsFragmentList(self):\n \"\"\"\n Tests that fetching the list of fragments from FRAGMENT_ABUNDANCES is as expected\n \"\"\"\n fragment_list = self.tree.get_fragment_list()\n expected_list = [\"fragment1\", \"fragment2\"]\n self.assertListEqual(expected_list, fragment_list)\n\n def testReadsFragmentAbundances(self):\n \"\"\"\n Tests that the fragment abundances are correctly read\n \"\"\"\n expected_abundances = [\n [610, 1],\n [611, 1],\n [612, 1],\n [613, 1],\n [614, 1],\n [615, 1],\n [616, 1],\n [617, 1],\n [618, 1],\n [619, 1],\n ]\n actual_abundances = self.tree.get_species_abundances(fragment=\"fragment2\", reference=1)\n for i, each in enumerate(expected_abundances):\n self.assertListEqual(actual_abundances[i], each)\n with self.assertRaises(ValueError):\n self.tree.get_species_abundances(fragment=\"fragment2\")\n expected_fragment_abundances_list = []\n for reference in self.tree.get_community_references():\n for fragment in self.tree.get_fragment_list(reference):\n fragment_abundances = self.tree.get_fragment_abundances(fragment=fragment, reference=reference)\n for species_id, abundance in fragment_abundances:\n expected_fragment_abundances_list.append(\n {\n \"fragment\": fragment,\n \"community_reference\": reference,\n \"species_id\": species_id,\n \"no_individuals\": abundance,\n }\n )\n expected_fragment_abundances = (\n pd.DataFrame(expected_fragment_abundances_list)\n .sort_values(by=[\"fragment\", \"community_reference\", \"species_id\"])\n .reset_index(drop=True)\n )\n actual_fragment_abundances = (\n self.tree.get_fragment_abundances_pd()\n .sort_values(by=[\"fragment\", \"community_reference\", \"species_id\"])\n .reset_index(drop=True)\n )\n assert_frame_equal(expected_fragment_abundances, actual_fragment_abundances, check_like=True)\n\n def testFragmentRichnessRaiseError(self):\n \"\"\"\n Tests that the correct errors are raised when no fragment exists with that name, or with the specified\n speciation rate, or time. Also checks SyntaxErrors and sqlite3.Errors when no FRAGMENT_RICHNESS table\n exists.\n \"\"\"\n failtree = CoalescenceTree()\n failtree.set_database(os.path.join(\"sample\", \"failsample.db\"))\n with self.assertRaises(IOError):\n failtree.get_fragment_richness()\n with self.assertRaises(IOError):\n failtree.get_fragment_richness_pd()\n with self.assertRaises(IOError):\n self.tree.get_fragment_richness(fragment=\"fragment4\", reference=1)\n with self.assertRaises(SyntaxError):\n self.tree.get_fragment_richness(fragment=\"fragment4\")\n with self.assertRaises(SyntaxError):\n self.tree.get_fragment_richness(reference=1)\n\n def testReadsFragmentOctaves(self):\n \"\"\"\n Tests that the fragment octaves can be read correctly.\n \"\"\"\n octaves = self.tree.get_fragment_octaves(fragment=\"fragment2\", reference=1)\n octaves2 = self.tree.get_fragment_octaves(fragment=\"fragment1\", reference=1)\n all_octaves = self.tree.get_fragment_octaves()\n desired = [\"fragment1\", 1, 0, 173]\n self.assertListEqual([0, 128], octaves[0])\n self.assertListEqual([0, 173], octaves2[0])\n self.assertListEqual(desired, [x for x in all_octaves if x[0] == \"fragment1\" and x[1] == 1 and x[2] == 0][0])\n expected_fragment_octaves_list = []\n for reference in self.tree.get_community_references():\n fragment_list = self.tree.get_fragment_list(reference)\n fragment_list.append(\"whole\")\n for fragment in fragment_list:\n try:\n octaves = self.tree.get_fragment_octaves(fragment=fragment, reference=reference)\n for octave, richness in octaves:\n expected_fragment_octaves_list.append(\n {\n \"fragment\": fragment,\n \"community_reference\": reference,\n \"octave\": octave,\n \"richness\": richness,\n }\n )\n except RuntimeError:\n continue\n expected_fragment_octaves = (\n pd.DataFrame(expected_fragment_octaves_list)\n .sort_values([\"fragment\", \"community_reference\", \"octave\"], axis=0)\n .reset_index(drop=True)\n )\n actual_fragment_octaves = (\n self.tree.get_fragment_octaves_pd()\n .sort_values([\"fragment\", \"community_reference\", \"octave\"], axis=0)\n .reset_index(drop=True)\n )\n assert_frame_equal(expected_fragment_octaves, actual_fragment_octaves, check_like=True)\n\n def testFragmentOctavesRaiseError(self):\n \"\"\"\n Tests that the correct errors are raised for different situations for reading fragment octaves\n \"\"\"\n failtree = CoalescenceTree()\n try:\n failtree.set_database(\"sample/failsample.db\")\n except sqlite3.Error:\n pass\n with self.assertRaises(sqlite3.Error):\n failtree.get_fragment_octaves(fragment=\"fragment4\", reference=100)\n with self.assertRaises(RuntimeError):\n self.tree.get_fragment_octaves(fragment=\"fragment4\", reference=100)\n with self.assertRaises(SyntaxError):\n self.tree.get_fragment_octaves(fragment=\"fragment4\")\n with self.assertRaises(SyntaxError):\n self.tree.get_fragment_octaves(reference=100)\n\n def testFragmentSampling(self):\n \"\"\"\n Tests that sampling from fragments is accurate.\n \"\"\"\n self.assertEqual(\n 10,\n self.tree.sample_fragment_richness(\n fragment=\"fragment1\", number_of_individuals=10, n=1, community_reference=2\n ),\n )\n self.assertEqual(\n 10,\n self.tree.sample_fragment_richness(\n fragment=\"fragment2\", number_of_individuals=10, n=10, community_reference=2\n ),\n )\n\n def testLandscapeSampling(self):\n \"\"\"Tests that the sampling from the landscape works as intended.\"\"\"\n number_dict = {\"fragment1\": 3, \"fragment2\": 10}\n np.random.seed(100)\n self.assertEqual(\n 13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)\n )\n self.assertAlmostEqual(\n 99.9, self.tree.sample_landscape_richness(number_of_individuals=100, n=10, community_reference=1), places=3\n )\n\n def testRaisesSamplingErrors(self):\n \"\"\"Tests that sampling errors are correctly raised\"\"\"\n number_dict = {\"fragment1\": 3000000, \"fragment2\": 10}\n with self.assertRaises(KeyError):\n self.assertEqual(\n 13, self.tree.sample_landscape_richness(number_of_individuals=number_dict, n=1, community_reference=2)\n )\n number_dict2 = {\"fragment\": 10, \"fragment2\": 10}\n with self.assertRaises(KeyError):\n self.assertEqual(\n 13, self.tree.sample_landscape_richness(number_of_individuals=number_dict2, n=1, community_reference=2)\n )\n\n def testSpeciesRichness(self):\n \"\"\"Tests that the simulation species richness is read correctly.\"\"\"\n actual_species_richness = (\n self.tree.get_species_richness_pd().sort_values(by=[\"community_reference\"]).reset_index(drop=True)\n )\n expected_species_richness_list = []\n for reference in self.tree.get_community_references():\n expected_species_richness_list.append(\n {\"community_reference\": reference, \"richness\": self.tree.get_species_richness(reference=reference)}\n )\n expected_species_richness = pd.DataFrame(expected_species_richness_list)\n assert_frame_equal(actual_species_richness, expected_species_richness, check_like=True)\n\n def testOctaves(self):\n \"\"\"Tests that the simulation octave classes are correctly calculated.\"\"\"\n actual_species_octaves = (\n self.tree.get_octaves_pd().sort_values(by=[\"community_reference\", \"octave\"]).reset_index(drop=True)\n )\n expected_species_octaves_list = []\n for reference in self.tree.get_community_references():\n for octave, richness in self.tree.get_octaves(reference):\n expected_species_octaves_list.append(\n {\"community_reference\": reference, \"octave\": octave, \"richness\": richness}\n )\n expected_species_octaves = pd.DataFrame(expected_species_octaves_list)\n assert_frame_equal(actual_species_octaves, expected_species_octaves, check_like=True)\n\n\nclass TestMetacommunityApplication(unittest.TestCase):\n \"\"\"\n Tests that a metacommunity can be applied correctly under the three different scenarios. Note that this does not\n test edge cases, just that the parameters are correctly stored and the different application methods work as\n intended.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Initialises the three database files to use.\"\"\"\n src = os.path.join(\"sample\", \"sample.db\")\n for i in range(6):\n dst = os.path.join(\"output\", \"sample_{}.db\".format(i))\n if os.path.exists(dst):\n os.remove(dst)\n shutil.copy2(src, dst)\n\n def testMetacommunityAddingInvalidParameters(self):\n \"\"\"Tests that adding invalid parameter for a metacommunity raises the appropriate errors.\"\"\"\n tree = CoalescenceTree(os.path.join(\"output\", \"sample_0.db\"))\n tree.wipe_data()\n with self.assertRaises(IOError):\n tree.get_metacommunity_parameters_pd()\n tree.set_speciation_parameters([0.1, 0.2])\n for size, spec, opt, ref in [\n [0, 0.1, \"simulated\", None],\n [10, 0.0, \"analytical\", None],\n [None, None, \"analytical\", None],\n [10, 0.0, \"path/to/file\", None],\n [0, 0.0, \"path/to/file\", None],\n [0, 0.0, \"path/to/not/a/file.db\", 1],\n ]:\n with self.assertRaises(ValueError):\n tree.add_metacommunity_parameters(\n metacommunity_size=size,\n metacommunity_speciation_rate=spec,\n metacommunity_option=opt,\n metacommunity_reference=ref,\n )\n with self.assertRaises(IOError):\n tree.add_metacommunity_parameters(metacommunity_option=\"not/a/file/db.db\", metacommunity_reference=1)\n\n def testMetacommunitySimulation(self):\n \"\"\"Tests that a simulated metacommunity works as intended.\"\"\"\n tree = CoalescenceTree(os.path.join(\"output\", \"sample_1.db\"))\n tree.wipe_data()\n tree.set_speciation_parameters(\n [0.1, 0.2], metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option=\"simulated\"\n )\n tree.add_metacommunity_parameters(\n metacommunity_size=15000, metacommunity_speciation_rate=0.1, metacommunity_option=\"simulated\"\n )\n tree.add_metacommunity_parameters(\n metacommunity_size=100000, metacommunity_speciation_rate=0.001, metacommunity_option=\"simulated\"\n )\n tree.apply()\n params_1 = tree.get_metacommunity_parameters(1)\n params_2 = tree.get_metacommunity_parameters(2)\n params_3 = tree.get_metacommunity_parameters(3)\n self.assertEqual(10000, params_1[\"metacommunity_size\"])\n self.assertEqual(0.001, params_1[\"speciation_rate\"])\n self.assertEqual(\"simulated\", params_1[\"option\"])\n self.assertEqual(0, params_1[\"external_reference\"])\n self.assertEqual(15000, params_2[\"metacommunity_size\"])\n self.assertEqual(0.1, params_2[\"speciation_rate\"])\n self.assertEqual(\"simulated\", params_2[\"option\"])\n self.assertEqual(0, params_2[\"external_reference\"])\n self.assertEqual(100000, params_3[\"metacommunity_size\"])\n self.assertEqual(0.001, params_3[\"speciation_rate\"])\n self.assertEqual(\"simulated\", params_3[\"option\"])\n self.assertEqual(0, params_3[\"external_reference\"])\n self.assertEqual(51, tree.get_species_richness(1))\n self.assertEqual(47, tree.get_species_richness(2))\n self.assertEqual(681, tree.get_species_richness(3))\n self.assertEqual(783, tree.get_species_richness(4))\n self.assertEqual(247, tree.get_species_richness(5))\n self.assertEqual(241, tree.get_species_richness(6))\n expected_metacommunity_parameters_list = []\n for reference in tree.get_community_references():\n try:\n params = tree.get_metacommunity_parameters(reference)\n params[\"reference\"] = reference\n expected_metacommunity_parameters_list.append(params)\n except KeyError:\n continue\n expected_metacommunity_parameters = pd.DataFrame(expected_metacommunity_parameters_list).sort_values(\n [\"reference\"]\n )\n actual_metacommunity_parameters = tree.get_metacommunity_parameters_pd().sort_values([\"reference\"])\n assert_frame_equal(expected_metacommunity_parameters, actual_metacommunity_parameters, check_like=True)\n\n def testMetacommunityAnalytical(self):\n \"\"\"Tests that an analytical metacommunity works as intended.\"\"\"\n tree = CoalescenceTree(os.path.join(\"output\", \"sample_2.db\"))\n tree.wipe_data()\n tree.set_speciation_parameters(\n [0.1, 0.2], metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option=\"analytical\"\n )\n tree.add_metacommunity_parameters(\n metacommunity_size=15000, metacommunity_speciation_rate=0.1, metacommunity_option=\"analytical\"\n )\n tree.add_metacommunity_parameters(\n metacommunity_size=100000, metacommunity_speciation_rate=0.001, metacommunity_option=\"analytical\"\n )\n tree.apply()\n params_1 = tree.get_metacommunity_parameters(1)\n params_2 = tree.get_metacommunity_parameters(2)\n params_3 = tree.get_metacommunity_parameters(3)\n self.assertEqual(10000, params_1[\"metacommunity_size\"])\n self.assertEqual(0.001, params_1[\"speciation_rate\"])\n self.assertEqual(\"analytical\", params_1[\"option\"])\n self.assertEqual(0, params_1[\"external_reference\"])\n self.assertEqual(15000, params_2[\"metacommunity_size\"])\n self.assertEqual(0.1, params_2[\"speciation_rate\"])\n self.assertEqual(\"analytical\", params_2[\"option\"])\n self.assertEqual(0, params_2[\"external_reference\"])\n self.assertEqual(100000, params_3[\"metacommunity_size\"])\n self.assertEqual(0.001, params_3[\"speciation_rate\"])\n self.assertEqual(\"analytical\", params_3[\"option\"])\n self.assertEqual(0, params_3[\"external_reference\"])\n self.assertEqual(51, tree.get_species_richness(1))\n self.assertEqual(57, tree.get_species_richness(2))\n self.assertEqual(694, tree.get_species_richness(3))\n self.assertEqual(760, tree.get_species_richness(4))\n self.assertEqual(222, tree.get_species_richness(5))\n self.assertEqual(234, tree.get_species_richness(6))\n\n def testMetacommunityExternal(self):\n \"\"\"Tests that an external metacommunity works as intended.\"\"\"\n tree = CoalescenceTree(os.path.join(\"output\", \"sample_3.db\"))\n tree.wipe_data()\n tree.set_speciation_parameters([0.1, 0.2], metacommunity_option=os.path.join(\"sample\", \"nse_reference.db\"))\n tree.add_metacommunity_parameters(\n metacommunity_option=os.path.join(\"sample\", \"nse_reference.db\"), metacommunity_reference=2\n )\n tree.apply()\n params_1 = tree.get_metacommunity_parameters(1)\n params_2 = tree.get_metacommunity_parameters(2)\n self.assertEqual(0, params_1[\"metacommunity_size\"])\n self.assertEqual(0.0, params_1[\"speciation_rate\"])\n self.assertEqual(os.path.join(\"sample\", \"nse_reference.db\"), params_1[\"option\"])\n self.assertEqual(1, params_1[\"external_reference\"])\n self.assertEqual(0, params_2[\"metacommunity_size\"])\n self.assertEqual(0.0, params_2[\"speciation_rate\"])\n self.assertEqual(os.path.join(\"sample\", \"nse_reference.db\"), params_2[\"option\"])\n self.assertEqual(2, params_2[\"external_reference\"])\n self.assertEqual(1, tree.get_species_richness(1))\n self.assertEqual(1, tree.get_species_richness(2))\n self.assertEqual(850, tree.get_species_richness(3))\n self.assertEqual(975, tree.get_species_richness(4))\n\n def testMetacommunityAnalyticalMethodDetection(self):\n \"\"\"Tests that the analytical method detection works correctly.\"\"\"\n tree = CoalescenceTree(os.path.join(\"output\", \"sample_4.db\"))\n tree.wipe_data()\n tree.set_speciation_parameters(\n [0.1, 0.2], metacommunity_size=110000, metacommunity_speciation_rate=0.5, metacommunity_option=\"none\"\n )\n tree.add_metacommunity_parameters(\n metacommunity_speciation_rate=0.5, metacommunity_size=120000, metacommunity_option=\"none\"\n )\n tree.apply()\n params_1 = tree.get_metacommunity_parameters(1)\n params_2 = tree.get_metacommunity_parameters(2)\n self.assertEqual(110000, params_1[\"metacommunity_size\"])\n self.assertEqual(0.5, params_1[\"speciation_rate\"])\n self.assertEqual(\"analytical\", params_1[\"option\"])\n self.assertEqual(120000, params_2[\"metacommunity_size\"])\n self.assertEqual(0.5, params_2[\"speciation_rate\"])\n self.assertEqual(\"analytical\", params_2[\"option\"])\n\n def testMetacommunitySimulatedMethodDetection(self):\n \"\"\"Tests that the simulated method detection works correctly.\"\"\"\n tree = CoalescenceTree(os.path.join(\"output\", \"sample_5.db\"))\n tree.wipe_data()\n tree.set_speciation_parameters(\n [0.1, 0.2], metacommunity_size=1000, metacommunity_speciation_rate=0.5, metacommunity_option=\"none\"\n )\n tree.add_metacommunity_parameters(\n metacommunity_speciation_rate=0.5, metacommunity_size=2000, metacommunity_option=\"none\"\n )\n tree.apply()\n params_1 = tree.get_metacommunity_parameters(1)\n params_2 = tree.get_metacommunity_parameters(2)\n self.assertEqual(1000, params_1[\"metacommunity_size\"])\n self.assertEqual(0.5, params_1[\"speciation_rate\"])\n self.assertEqual(\"simulated\", params_1[\"option\"])\n self.assertEqual(2000, params_2[\"metacommunity_size\"])\n self.assertEqual(0.5, params_2[\"speciation_rate\"])\n self.assertEqual(\"simulated\", params_2[\"option\"])\n\n\n@skipLongTest\nclass TestMetacommunityApplicationSpeciesAbundances(unittest.TestCase):\n \"\"\"Tests that the metacommunity application produces the expected species abundance distribution.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Run a non-spatial sim and apply a metacommunity.\"\"\"\n cls.sim = Simulation()\n cls.sim.set_simulation_parameters(\n seed=11, task=110, output_directory=\"output\", min_speciation_rate=0.1, spatial=False, deme=20541\n )\n cls.sim.run()\n cls.ct = CoalescenceTree(cls.sim)\n cls.ct.wipe_data()\n cls.ct.set_speciation_parameters(speciation_rates=0.1)\n cls.ct.add_metacommunity_parameters(\n metacommunity_option=\"analytical\", metacommunity_size=1000000, metacommunity_speciation_rate=0.00005\n )\n cls.ct.add_metacommunity_parameters(\n metacommunity_option=\"simulated\", metacommunity_size=1000000, metacommunity_speciation_rate=0.00005\n )\n # This just tests that it doesn't take forever and produces a sensible output\n cls.ct.add_metacommunity_parameters(\n metacommunity_option=\"analytical\", metacommunity_size=1000000000, metacommunity_speciation_rate=0.1\n )\n cls.ct.apply()\n\n def testRichnessMatchness(self):\n \"\"\"Tests that the species richness is roughly equivalent between the two methods.\"\"\"\n self.assertAlmostEqual(244, self.ct.get_species_richness(2), delta=10)\n self.assertAlmostEqual(self.ct.get_species_richness(1), self.ct.get_species_richness(2), delta=30)\n self.assertEqual(5212, self.ct.get_species_richness(3))\n\n def testSpeciesAbundances(self):\n \"\"\"Tests the species abundance distribution is roughly equivalent between the two methods.\"\"\"\n sad_1 = [x[1] for x in self.ct.get_species_abundances(reference=1)]\n sad_2 = [x[1] for x in self.ct.get_species_abundances(reference=2)]\n mean_1 = sum(sad_1) / len(sad_1)\n mean_2 = sum(sad_2) / len(sad_2)\n # Check the mean abundance is roughly equivalent\n self.assertAlmostEqual(mean_1, mean_2, delta=10)\n # Check that the variances are roughly equivalent\n var_list_1 = [abs(x - mean_1) for x in sad_1]\n var_list_2 = [abs(x - mean_2) for x in sad_2]\n var_1 = sum(var_list_1) / len(var_list_1)\n var_2 = sum(var_list_2) / len(var_list_2)\n self.assertAlmostEqual(var_1, var_2, delta=5)\n expected_abundances_list = []\n for reference in self.ct.get_community_references():\n for species_id, abundance in self.ct.get_species_abundances(reference=reference):\n expected_abundances_list.append(\n {\"community_reference\": reference, \"species_id\": species_id, \"no_individuals\": abundance}\n )\n expected_abundances = pd.DataFrame(expected_abundances_list)\n actual_abundances = self.ct.get_species_abundances_pd()\n assert_frame_equal(actual_abundances, expected_abundances, check_like=True)\n\n\nclass TestMetacommunityApplicationOrdering(unittest.TestCase):\n \"\"\"Tests that the ordering of adding parameters to the metacommunity does not matter.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Generates the test databases.\"\"\"\n src = os.path.join(\"sample\", \"sample3.db\")\n for i in [1, 2]:\n dst = os.path.join(\"output\", \"sample_order_{}.db\".format(i))\n if os.path.exists(dst):\n os.remove(dst)\n shutil.copy(src, dst)\n src = os.path.join(\"sample\", \"sample5.db\")\n for i in range(3, 6):\n dst = os.path.join(\"output\", \"sample_order_{}.db\".format(i))\n if os.path.exists(dst):\n os.remove(dst)\n shutil.copy(src, dst)\n cls.c1 = CoalescenceTree(os.path.join(\"output\", \"sample_order_1.db\"))\n cls.c2 = CoalescenceTree(os.path.join(\"output\", \"sample_order_2.db\"))\n cls.proc1 = CoalescenceTree(os.path.join(\"output\", \"sample_order_3.db\"))\n cls.proc2 = CoalescenceTree(os.path.join(\"output\", \"sample_order_4.db\"))\n cls.proc3 = CoalescenceTree(os.path.join(\"output\", \"sample_order_5.db\"))\n cls.c1.set_speciation_parameters(\n [0.1, 0.5, 0.9],\n metacommunity_speciation_rate=0.001,\n metacommunity_option=\"simulated\",\n metacommunity_size=10000,\n )\n cls.c1.apply()\n cls.c2.set_speciation_parameters([0.1, 0.5, 0.9])\n cls.c2.add_metacommunity_parameters(\n metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option=\"simulated\"\n )\n cls.c2.apply()\n cls.proc1.set_speciation_parameters(\n [0.1, 0.5, 0.9],\n protracted_speciation_min=5,\n protracted_speciation_max=1000,\n metacommunity_option=\"simulated\",\n metacommunity_speciation_rate=0.001,\n metacommunity_size=10000,\n )\n cls.proc1.apply()\n cls.proc2.set_speciation_parameters([0.1, 0.5, 0.9])\n cls.proc2.add_metacommunity_parameters(\n metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option=\"simulated\"\n )\n cls.proc2.add_protracted_parameters(min_speciation_gen=5, max_speciation_gen=1000)\n cls.proc2.apply()\n cls.proc3.set_speciation_parameters([0.1, 0.5, 0.9])\n cls.proc3.add_protracted_parameters(min_speciation_gen=5, max_speciation_gen=1000)\n cls.proc3.add_metacommunity_parameters(\n metacommunity_size=10000, metacommunity_speciation_rate=0.001, metacommunity_option=\"simulated\"\n )\n cls.proc3.apply()\n\n def testEquivalentMethodsMatch(self):\n \"\"\"Tests that equivalent methods of applying metacommunities produce equivalent results.\"\"\"\n for i in range(1, 4):\n self.assertEqual(self.c1.get_species_richness(i), self.c2.get_species_richness(i))\n self.assertEqual(self.proc1.get_species_richness(i), self.proc2.get_species_richness(i))\n self.assertEqual(self.proc2.get_species_richness(i), self.proc3.get_species_richness(i))\n\n def testMultipleProtractedError(self):\n \"\"\"Tests that adding multiple protracted speciation parameters raises the correct error.\"\"\"\n with self.assertRaises(ValueError):\n self.proc2.add_multiple_protracted_parameters()\n\n\nclass TestProtractedSpeciationEquality(unittest.TestCase):\n \"\"\"Tests that analysis performs as expected when protracted speciation parameters match the minimums.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Copy the sample database.\"\"\"\n dst = os.path.join(\"output\", \"sample_protracted3.db\")\n shutil.copy(os.path.join(\"sample\", \"sample3.db\"), dst)\n cls.ct = CoalescenceTree(dst)\n cls.ct.wipe_data()\n\n def testApplyEqualParameters(self):\n \"\"\"Tests that equal protracted parameters can be applied\"\"\"\n self.ct.set_speciation_parameters(\n [0.001, 0.1], protracted_speciation_min=100.0, protracted_speciation_max=10000.0\n )\n self.ct.apply()\n self.assertEqual(1, self.ct.get_species_richness(1))\n self.assertEqual(3, self.ct.get_species_richness(2))\n\n\nclass TestSpeciesAgesCalculations(unittest.TestCase):\n \"\"\"Tests that operations associated with the species ages operate as expected\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Copies the sample databases and applies a basic set of community parameters.\"\"\"\n src = os.path.join(\"sample\", \"sample6.db\")\n dst = os.path.join(\"output\", \"sample6.db\")\n if os.path.exists(dst):\n os.remove(dst)\n shutil.copy(src, dst)\n cls.dst_file = dst\n\n def testSmallSimulation(self):\n tree = CoalescenceTree(logging_level=50)\n tree.set_database(self.dst_file)\n with self.assertRaises(IOError):\n _ = tree.get_species_ages()\n with self.assertRaises(IOError):\n _ = tree.get_species_ages_pd()\n tree.wipe_data()\n with self.assertRaises(IOError):\n _ = tree.get_species_ages()\n with self.assertRaises(IOError):\n _ = tree.get_species_ages_pd()\n tree.set_speciation_parameters(\n speciation_rates=[0.000001, 0.0001],\n record_spatial=False,\n record_ages=True,\n )\n tree.apply()\n self.assertTrue(check_sql_table_exist(tree.database, \"SPECIES_AGES\"))\n expected_df = pd.read_csv(os.path.join(\"sample\", \"expected_species_ages.csv\"))\n actual_df = tree.get_species_ages_pd().reset_index(drop=True)\n assert_frame_equal(expected_df, actual_df)\n for community_ref, group in expected_df.groupby([\"community_reference\"]):\n actual_output = sorted(tree.get_species_ages(community_ref), key=lambda x: x[0])\n expected_output = group.drop(columns=[\"community_reference\"]).sort_values(by=[\"species_id\"]).values.tolist()\n for ex, act in zip(expected_output, actual_output):\n self.assertEqual(ex[0], act[0])\n self.assertAlmostEqual(ex[1], act[1], delta=0.0000001)\n"
] | [
[
"pandas.DataFrame",
"numpy.random.seed",
"pandas.testing.assert_frame_equal"
]
] |
cmougan/OODBenchmark | [
"e5d7b9540840afe64f6a00139cbc41a44ed01a80"
] | [
"xAIbenchmark.py"
] | [
"# %%\nfrom pmlb import fetch_data\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import cross_val_predict, KFold\nfrom sklearn.metrics import mean_squared_error, roc_auc_score\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nimport warnings\nimport re\nimport traceback\nfrom pmlb import classification_dataset_names, regression_dataset_names\nfrom benchmark import benchmark_experiment\nfrom sklearn.linear_model import Lasso, LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nimport warnings\nfrom fairtools.xaiUtils import ShapEstimator\nimport xgboost\n\n\nwarnings.filterwarnings(\"ignore\")\n\n# %%\ndef benchmark_experiment(datasets: list, model, classification: str = \"classification\"):\n\n assert classification in [\n \"classification\",\n \"regression\",\n \"explainableAI\",\n ], \"Classification type introduced --{}-- does not match: classification,regression,explainableAI\".format(\n classification\n )\n\n if classification == \"classification\":\n extension = \"_clas\"\n elif classification == \"regression\":\n extension = \"_reg\"\n elif classification == \"explainableAI\":\n extension = \"_explain\"\n else:\n raise \"Classification type not contained\"\n\n results = defaultdict()\n for i, dataset in enumerate(datasets):\n try:\n # Initialise the scaler\n standard_scaler = StandardScaler()\n\n # Load the dataset and split it\n X, y = fetch_data(dataset, return_X_y=True, local_cache_dir=\"data/\")\n\n # Scale the dataset\n X = standard_scaler.fit_transform(X)\n if classification == False:\n y = standard_scaler.fit_transform(y.reshape(-1, 1))\n\n # Back to dataframe\n X = pd.DataFrame(X, columns=[\"Var %d\" % (i + 1) for i in range(X.shape[1])])\n data = X.copy()\n data[\"target\"] = y\n\n # Min and max data limits for the experiment\n if X.shape[0] < 100:\n continue\n if X.shape[0] > 100_000:\n continue\n # Train test splitting points\n fracc = 0.33\n oneThird = int(data.shape[0] * fracc)\n twoThird = data.shape[0] - int(data.shape[0] * fracc)\n\n for idx, col in tqdm(enumerate(X.columns), total=len(X.columns)):\n\n # Sort data on the column\n data = data.sort_values(col).reset_index(drop=True).copy()\n\n # Train Test Split\n data_sub = data.iloc[:oneThird]\n data_train = data.iloc[oneThird:twoThird]\n data_up = data.iloc[twoThird:]\n\n X_tot = data.drop(columns=\"target\")\n X_tr = data_train.drop(columns=\"target\")\n X_sub = data_sub.drop(columns=\"target\")\n X_up = data_up.drop(columns=\"target\")\n\n y_tot = data[[\"target\"]].target.values\n y_tr = data_train[[\"target\"]].target.values\n y_sub = data_sub[[\"target\"]].target.values\n y_up = data_up[[\"target\"]].target.values\n\n # Error Calculation\n if classification == \"classification\":\n ## Test predictions\n pred_test = cross_val_predict(\n estimator=model,\n X=X_tr,\n y=y_tr,\n cv=KFold(n_splits=5, shuffle=True, random_state=0),\n method=\"predict_proba\",\n )[:, 1]\n\n ## Train\n model.fit(X_tr, y_tr)\n pred_train = model.predict_proba(X_tr)[:, 1]\n\n ## OOD\n X_ood = X_sub.append(X_up)\n y_ood = np.concatenate((y_sub, y_up))\n pred_ood = model.predict_proba(X_ood)[:, 1]\n\n train_error = roc_auc_score(y_tr, pred_train)\n test_error = roc_auc_score(y_tr, pred_test)\n ood_error = roc_auc_score(y_ood, pred_ood)\n generalizationError = test_error - train_error\n ood_performance = ood_error - test_error\n elif classification == \"regression\":\n ## Test predictions\n pred_test = cross_val_predict(\n estimator=model,\n X=X_tr,\n y=y_tr,\n cv=KFold(n_splits=5, shuffle=True, random_state=0),\n )\n\n ## Train\n model.fit(X_tr, y_tr)\n pred_train = model.predict(X_tr)\n\n ## OOD\n X_ood = X_sub.append(X_up)\n y_ood = np.concatenate((y_sub, y_up))\n pred_ood = model.predict(X_ood)\n\n train_error = mean_squared_error(pred_train, y_tr)\n test_error = mean_squared_error(pred_test, y_tr)\n ood_error = mean_squared_error(pred_ood, y_ood)\n\n generalizationError = test_error - train_error\n ood_performance = ood_error - test_error\n elif classification == \"explainableAI\":\n # Explainer predictor\n se = ShapEstimator(model=xgboost.XGBRegressor())\n shap_pred_tr = cross_val_predict(se, X_tr, y_tr, cv=3)\n ## Test predictions\n\n pred_test = cross_val_predict(\n estimator=model,\n X=shap_pred_tr,\n y=y_tr,\n cv=KFold(n_splits=5, shuffle=True, random_state=0),\n )\n\n ## Train\n se.fit(X_tr, y_tr)\n model.fit(shap_pred_tr, y_tr)\n pred_train = model.predict(shap_pred_tr)\n\n ## Generate OOD Shap data\n X_ood = X_sub.append(X_up)\n y_ood = np.concatenate((y_sub, y_up))\n shap_pred_ood = se.predict(X_ood)\n\n ## OOD\n pred_ood = model.predict(shap_pred_ood)\n\n train_error = mean_squared_error(pred_train, y_tr)\n test_error = mean_squared_error(pred_test, y_tr)\n ood_error = mean_squared_error(pred_ood, y_ood)\n\n generalizationError = test_error - train_error\n ood_performance = ood_error - test_error\n\n # Append Results\n model_name = str(type(model)).split(\".\")[-1]\n model_name = re.sub(\"[^A-Za-z0-9]+\", \"\", model_name)\n name = dataset + \"_column_\" + col\n results[name] = [\n train_error,\n test_error,\n ood_error,\n generalizationError,\n ood_performance,\n model_name,\n ]\n\n except Exception:\n print(traceback.format_exc())\n print(\"Not Working:\", dataset)\n print(\"Dataset shape:\", len(dataset))\n pass\n\n df = pd.DataFrame(data=results).T\n df.columns = [\n \"trainError\",\n \"testError\",\n \"oodError\",\n \"generalizationError\",\n \"oodPerformance\",\n \"model\",\n ]\n df.to_csv(\"results/\" + model_name + extension + \".csv\")\n\n\n# %%\nregression_dataset_names_sample = regression_dataset_names[:10]\n# %%\n\nmodelitos = [\n GradientBoostingRegressor(),\n]\nfor m in modelitos:\n benchmark_experiment(\n datasets=regression_dataset_names_sample,\n model=m,\n classification=\"explainableAI\",\n )\n\n# %%\n"
] | [
[
"sklearn.metrics.mean_squared_error",
"sklearn.model_selection.KFold",
"pandas.DataFrame",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.cross_val_predict",
"sklearn.preprocessing.StandardScaler",
"numpy.concatenate",
"sklearn.ensemble.GradientBoostingRegressor"
]
] |
andresmasegosa/PRML-CoreSets | [
"fb768debb15e3ff6f5b65b7224915a41c1493f3d"
] | [
"[email protected]/bayesian_pca_DR.py"
] | [
"import numpy as np\nfrom prml.feature_extractions.pca import PCA\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\nfrom sklearn.preprocessing import StandardScaler\n\n\nclass BayesianPCA_DR(PCA):\n\n def _clusteringError(self, X, kmeans):\n sum = 0\n for i in range(0, kmeans.cluster_centers_.shape[0]):\n a = X[kmeans.labels_ == i, :] - kmeans.cluster_centers_[i, :]\n sum += np.sqrt((a * a).sum(axis=1)).sum(axis=0)\n return sum\n\n def _random(self, X, n_clusters):\n\n centers_X = X[np.random.choice(X.shape[0], n_clusters, replace=False),:]\n centers_XX = centers_X**2\n weights = np.repeat(X.shape[0]/n_clusters,n_clusters)\n\n self.X_dr = {'X': centers_X, 'XX': centers_XX,\n 'W': weights}\n\n def _clusterSS(self, X, n_clusters):\n XX = X ** 2\n XJoin = np.concatenate((X, XX), axis=1)\n self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(XJoin)\n weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])\n D=X.shape[1]\n self.X_dr = {'X': self.kmeans.cluster_centers_[:, 0:D], 'XX': self.kmeans.cluster_centers_[:, D:2 * D], 'W': weights}\n self.clusterError = self._clusteringError(XJoin,self.kmeans)\n\n def _cluster(self, X, n_clusters):\n self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(X)\n weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])\n self.X_dr = {'X': self.kmeans.cluster_centers_, 'XX': self.kmeans.cluster_centers_ ** 2, 'W': weights}\n\n # def _clusterSS(self, X, n_clusters):\n # scaler = StandardScaler()\n # XX = X ** 2\n # XJoin = np.concatenate((X, XX), axis=1)\n # self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(scaler.fit_transform(XJoin))\n # weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])\n # D=X.shape[1]\n # self.kmeans.cluster_centers_=scaler.inverse_transform(self.kmeans.cluster_centers_)\n # self.X_dr = {'X': self.kmeans.cluster_centers_[:, 0:D], 'XX': self.kmeans.cluster_centers_[:, D:2 * D], 'W': weights}\n #\n # def _cluster(self, X, n_clusters):\n # scaler = StandardScaler()\n # self.kmeans = MiniBatchKMeans(n_clusters=n_clusters).fit(scaler.fit_transform(X))\n # weights = np.asarray([sum(self.kmeans.labels_ == x) for x in range(0, n_clusters)])\n # self.kmeans.cluster_centers_=scaler.inverse_transform(self.kmeans.cluster_centers_)\n # self.X_dr = {'X': self.kmeans.cluster_centers_, 'XX': self.kmeans.cluster_centers_ ** 2, 'W': weights}\n\n def eigen(self, X_dr, *arg):\n sample_size = np.sum(X_dr['W'])\n X = self.X_dr['W'][:,None]*self.X_dr['X']\n n_features = X.shape[1]\n if sample_size >= n_features:\n cov = np.cov(X, rowvar=False)\n values, vectors = np.linalg.eigh(cov)\n index = n_features - self.n_components\n else:\n cov = np.cov(X)\n values, vectors = np.linalg.eigh(cov)\n vectors = (X.T @ vectors) / np.sqrt(sample_size * values)\n index = sample_size - self.n_components\n self.I = np.eye(self.n_components)\n if index == 0:\n self.var = 0\n else:\n self.var = np.mean(values[:index])\n\n self.W = vectors[:, index:].dot(np.sqrt(np.diag(values[index:]) - self.var * self.I))\n self.__M = self.W.T @ self.W + self.var * self.I\n self.C = self.W @ self.W.T + self.var * np.eye(n_features)\n if index == 0:\n self.Cinv = np.linalg.inv(self.C)\n else:\n self.Cinv = np.eye(n_features) / np.sqrt(self.var) - self.W @ np.linalg.inv(self.__M) @ self.W.T / self.var\n\n def fit(self, X, iter_max=100, initial=\"random\", n_clusters=10, cluster_method=\"SS\"):\n \"\"\"\n empirical bayes estimation of pca parameters\n\n Parameters\n ----------\n X : (sample_size, n_features) ndarray\n input data\n iter_max : int\n maximum number of em steps\n\n Returns\n -------\n mean : (n_features,) ndarray\n sample mean fo the input data\n W : (n_features, n_components) ndarray\n projection matrix\n var : float\n variance of observation noise\n \"\"\"\n if cluster_method== \"SS\":\n self._clusterSS(X,n_clusters)\n elif cluster_method== \"NoSS\":\n self._cluster(X,n_clusters)\n elif cluster_method == \"random\":\n self._random(X,n_clusters)\n\n initial_list = [\"random\", \"eigen\"]\n self.mean = np.sum(self.X_dr['W'][:,None]*self.X_dr['X'], axis=0)/sum(self.X_dr['W'])\n self.I = np.eye(self.n_components)\n if initial not in initial_list:\n print(\"availabel initializations are {}\".format(initial_list))\n if initial == \"random\":\n self.W = np.eye(np.size(self.X_dr['X'], 1), self.n_components)\n self.var = 1.\n elif initial == \"eigen\":\n self.eigen(self.X_dr)\n self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)\n\n\n for i in range(iter_max):\n W = np.copy(self.W)\n Ez, Ezz = self._expectation(self.X_dr['X']-self.mean)\n self._maximization(self.X_dr, Ez, Ezz)\n #self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)\n if np.allclose(W, self.W):\n break\n self.n_iter = i + 1\n self.C = self.W @ self.W.T + self.var * np.eye(np.size(self.X_dr['X'], 1))\n self.Cinv = np.linalg.inv(self.C)\n\n def _maximization(self, X_dr, Ez, Ezz):\n X_mean = (X_dr['X']-self.mean)\n self.W = (X_mean*X_dr['W'][:,None]).T @ Ez @ np.linalg.inv(np.sum(Ezz*X_dr['W'][:,None,None], axis=0) + self.var * np.diag(self.alpha))\n self.var = np.sum(\n (np.mean((X_dr['XX'] - 2*X_dr['X']*self.mean + self.mean ** 2), axis=-1)\n #(np.mean((X_mean** 2), axis=-1)\n - 2 * np.mean(Ez @ self.W.T * X_mean, axis=-1)\n + np.trace((Ezz @ self.W.T @ self.W).T)/ len(self.mean))*X_dr['W'])/sum(X_dr['W'])\n self.var=max(self.var,0.000001)\n\n def maximize(self, D, Ez, Ezz):\n self.W = D.T.dot(Ez).dot(np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha)))\n self.var = np.mean(\n np.mean(D ** 2, axis=-1)\n - 2 * np.mean(Ez.dot(self.W.T) * D, axis=-1)\n + np.trace(Ezz.dot(self.W.T).dot(self.W).T) / self.ndim)\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.eye",
"numpy.allclose",
"numpy.diag",
"numpy.linalg.inv",
"numpy.linalg.eigh",
"numpy.repeat",
"numpy.copy",
"numpy.size",
"numpy.random.choice",
"numpy.trace",
"sklearn.cluster.MiniBatchKMeans",
"numpy.mean",
"numpy.concatenate",
"numpy.cov"
]
] |
WERimagin/transformers | [
"cc7d14511c647f8147494df72f8b0575015e37ab"
] | [
"tests/test_data_collator.py"
] | [
"import unittest\n\nfrom transformers import AutoTokenizer, is_torch_available\nfrom transformers.testing_utils import require_torch, slow\n\n\nif is_torch_available():\n import torch\n\n from transformers import (\n DataCollatorForLanguageModeling,\n DataCollatorForNextSentencePrediction,\n DataCollatorForPermutationLanguageModeling,\n DataCollatorForSOP,\n GlueDataset,\n GlueDataTrainingArguments,\n LineByLineTextDataset,\n LineByLineWithSOPTextDataset,\n TextDataset,\n TextDatasetForNextSentencePrediction,\n default_data_collator,\n )\n\n\nPATH_SAMPLE_TEXT = \"./tests/fixtures/sample_text.txt\"\nPATH_SAMPLE_TEXT_DIR = \"./tests/fixtures/tests_samples/wiki_text\"\n\n\n@require_torch\nclass DataCollatorIntegrationTest(unittest.TestCase):\n def test_default_with_dict(self):\n features = [{\"label\": i, \"inputs\": [0, 1, 2, 3, 4, 5]} for i in range(8)]\n batch = default_data_collator(features)\n self.assertTrue(batch[\"labels\"].equal(torch.tensor(list(range(8)))))\n self.assertEqual(batch[\"labels\"].dtype, torch.long)\n self.assertEqual(batch[\"inputs\"].shape, torch.Size([8, 6]))\n\n # With label_ids\n features = [{\"label_ids\": [0, 1, 2], \"inputs\": [0, 1, 2, 3, 4, 5]} for i in range(8)]\n batch = default_data_collator(features)\n self.assertTrue(batch[\"labels\"].equal(torch.tensor([[0, 1, 2]] * 8)))\n self.assertEqual(batch[\"labels\"].dtype, torch.long)\n self.assertEqual(batch[\"inputs\"].shape, torch.Size([8, 6]))\n\n # Features can already be tensors\n features = [{\"label\": i, \"inputs\": torch.randint(10, [10])} for i in range(8)]\n batch = default_data_collator(features)\n self.assertTrue(batch[\"labels\"].equal(torch.tensor(list(range(8)))))\n self.assertEqual(batch[\"labels\"].dtype, torch.long)\n self.assertEqual(batch[\"inputs\"].shape, torch.Size([8, 10]))\n\n # Labels can already be tensors\n features = [{\"label\": torch.tensor(i), \"inputs\": torch.randint(10, [10])} for i in range(8)]\n batch = default_data_collator(features)\n self.assertEqual(batch[\"labels\"].dtype, torch.long)\n self.assertTrue(batch[\"labels\"].equal(torch.tensor(list(range(8)))))\n self.assertEqual(batch[\"labels\"].dtype, torch.long)\n self.assertEqual(batch[\"inputs\"].shape, torch.Size([8, 10]))\n\n def test_default_with_no_labels(self):\n features = [{\"label\": None, \"inputs\": [0, 1, 2, 3, 4, 5]} for i in range(8)]\n batch = default_data_collator(features)\n self.assertTrue(\"labels\" not in batch)\n self.assertEqual(batch[\"inputs\"].shape, torch.Size([8, 6]))\n\n # With label_ids\n features = [{\"label_ids\": None, \"inputs\": [0, 1, 2, 3, 4, 5]} for i in range(8)]\n batch = default_data_collator(features)\n self.assertTrue(\"labels\" not in batch)\n self.assertEqual(batch[\"inputs\"].shape, torch.Size([8, 6]))\n\n @slow\n def test_default_classification(self):\n MODEL_ID = \"bert-base-cased-finetuned-mrpc\"\n tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)\n data_args = GlueDataTrainingArguments(\n task_name=\"mrpc\", data_dir=\"./tests/fixtures/tests_samples/MRPC\", overwrite_cache=True\n )\n dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"dev\")\n data_collator = default_data_collator\n batch = data_collator(dataset.features)\n self.assertEqual(batch[\"labels\"].dtype, torch.long)\n\n @slow\n def test_default_regression(self):\n MODEL_ID = \"distilroberta-base\"\n tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)\n data_args = GlueDataTrainingArguments(\n task_name=\"sts-b\", data_dir=\"./tests/fixtures/tests_samples/STS-B\", overwrite_cache=True\n )\n dataset = GlueDataset(data_args, tokenizer=tokenizer, mode=\"dev\")\n data_collator = default_data_collator\n batch = data_collator(dataset.features)\n self.assertEqual(batch[\"labels\"].dtype, torch.float)\n\n @slow\n def test_lm_tokenizer_without_padding(self):\n tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)\n # ^ causal lm\n\n dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)\n examples = [dataset[i] for i in range(len(dataset))]\n with self.assertRaises(ValueError):\n # Expect error due to padding token missing on gpt2:\n data_collator(examples)\n\n dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)\n examples = [dataset[i] for i in range(len(dataset))]\n batch = data_collator(examples)\n self.assertIsInstance(batch, dict)\n self.assertEqual(batch[\"input_ids\"].shape, torch.Size((2, 512)))\n self.assertEqual(batch[\"labels\"].shape, torch.Size((2, 512)))\n\n @slow\n def test_lm_tokenizer_with_padding(self):\n tokenizer = AutoTokenizer.from_pretrained(\"distilroberta-base\")\n data_collator = DataCollatorForLanguageModeling(tokenizer)\n # ^ masked lm\n\n dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)\n examples = [dataset[i] for i in range(len(dataset))]\n batch = data_collator(examples)\n self.assertIsInstance(batch, dict)\n self.assertEqual(batch[\"input_ids\"].shape, torch.Size((31, 107)))\n self.assertEqual(batch[\"labels\"].shape, torch.Size((31, 107)))\n\n dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)\n examples = [dataset[i] for i in range(len(dataset))]\n batch = data_collator(examples)\n self.assertIsInstance(batch, dict)\n self.assertEqual(batch[\"input_ids\"].shape, torch.Size((2, 512)))\n self.assertEqual(batch[\"labels\"].shape, torch.Size((2, 512)))\n\n @slow\n def test_plm(self):\n tokenizer = AutoTokenizer.from_pretrained(\"xlnet-base-cased\")\n data_collator = DataCollatorForPermutationLanguageModeling(tokenizer)\n # ^ permutation lm\n\n dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)\n examples = [dataset[i] for i in range(len(dataset))]\n batch = data_collator(examples)\n self.assertIsInstance(batch, dict)\n self.assertEqual(batch[\"input_ids\"].shape, torch.Size((31, 112)))\n self.assertEqual(batch[\"perm_mask\"].shape, torch.Size((31, 112, 112)))\n self.assertEqual(batch[\"target_mapping\"].shape, torch.Size((31, 112, 112)))\n self.assertEqual(batch[\"labels\"].shape, torch.Size((31, 112)))\n\n dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)\n examples = [dataset[i] for i in range(len(dataset))]\n batch = data_collator(examples)\n self.assertIsInstance(batch, dict)\n self.assertEqual(batch[\"input_ids\"].shape, torch.Size((2, 512)))\n self.assertEqual(batch[\"perm_mask\"].shape, torch.Size((2, 512, 512)))\n self.assertEqual(batch[\"target_mapping\"].shape, torch.Size((2, 512, 512)))\n self.assertEqual(batch[\"labels\"].shape, torch.Size((2, 512)))\n\n example = [torch.randint(5, [5])]\n with self.assertRaises(ValueError):\n # Expect error due to odd sequence length\n data_collator(example)\n\n @slow\n def test_nsp(self):\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n data_collator = DataCollatorForNextSentencePrediction(tokenizer)\n\n dataset = TextDatasetForNextSentencePrediction(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)\n examples = [dataset[i] for i in range(len(dataset))]\n batch = data_collator(examples)\n self.assertIsInstance(batch, dict)\n\n # Since there are randomly generated false samples, the total number of samples is not fixed.\n total_samples = batch[\"input_ids\"].shape[0]\n self.assertEqual(batch[\"input_ids\"].shape, torch.Size((total_samples, 512)))\n self.assertEqual(batch[\"token_type_ids\"].shape, torch.Size((total_samples, 512)))\n self.assertEqual(batch[\"masked_lm_labels\"].shape, torch.Size((total_samples, 512)))\n self.assertEqual(batch[\"next_sentence_label\"].shape, torch.Size((total_samples,)))\n\n @slow\n def test_sop(self):\n tokenizer = AutoTokenizer.from_pretrained(\"albert-base-v2\")\n data_collator = DataCollatorForSOP(tokenizer)\n\n dataset = LineByLineWithSOPTextDataset(tokenizer, file_dir=PATH_SAMPLE_TEXT_DIR, block_size=512)\n examples = [dataset[i] for i in range(len(dataset))]\n batch = data_collator(examples)\n self.assertIsInstance(batch, dict)\n\n # Since there are randomly generated false samples, the total number of samples is not fixed.\n total_samples = batch[\"input_ids\"].shape[0]\n self.assertEqual(batch[\"input_ids\"].shape, torch.Size((total_samples, 512)))\n self.assertEqual(batch[\"token_type_ids\"].shape, torch.Size((total_samples, 512)))\n self.assertEqual(batch[\"labels\"].shape, torch.Size((total_samples, 512)))\n self.assertEqual(batch[\"sentence_order_label\"].shape, torch.Size((total_samples,)))\n"
] | [
[
"torch.tensor",
"torch.Size",
"torch.randint"
]
] |
lucko515/cnn-raccoon | [
"e1c46544372751d82cc0c0f9cb2218d881a21f70"
] | [
"examples/tensorflow_example.py"
] | [
"import tensorflow as tf\n\nmodel = tf.keras.models.Sequential([\n # YOUR CODE HERE\n tf.keras.layers.BatchNormalization(input_shape=(32, 32, 3)),\n tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPool2D(2, 2),\n tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPool2D(2, 2),\n tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPool2D(2, 2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(units=128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\")\n ])\n\nmodel.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"acc\"])\n\nfrom tensorflow.keras.datasets import cifar10\n\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\n\nfrom cnn_raccoon import inspector\ninspector(model=model, images=X_train[:10], number_of_classes=10, engine=\"keras\")\n"
] | [
[
"tensorflow.keras.datasets.cifar10.load_data",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPool2D"
]
] |
krystophny/chaospy | [
"18ff6c4fc56c632825e53fb24e17de51a7febd7d"
] | [
"chaospy/distributions/collection/f.py"
] | [
"\"\"\"(Non-central) F distribution.\"\"\"\nimport numpy\nfrom scipy import special\n\nfrom ..baseclass import Dist\nfrom ..operators.addition import Add\n\n\nclass f(Dist):\n \"\"\"F distribution.\"\"\"\n\n def __init__(self, dfn, dfd, nc):\n Dist.__init__(self, dfn=dfn, dfd=dfd, nc=nc)\n\n def _pdf(self, x, dfn, dfd, nc):\n n1, n2 = dfn, dfd\n term = -nc/2.+nc*n1*x/(2*(n2+n1*x)) + special.gammaln(n1/2.)+special.gammaln(1+n2/2.)\n term -= special.gammaln((n1+n2)/2.)\n Px = numpy.exp(term)\n Px *= n1**(n1/2.) * n2**(n2/2.) * x**(n1/2.-1)\n Px *= (n2+n1*x)**(-(n1+n2)/2.)\n Px *= special.assoc_laguerre(-nc*n1*x/(2.*(n2+n1*x)), n2/2., n1/2.-1)\n Px /= special.beta(n1/2., n2/2.)\n return Px\n\n def _cdf(self, x, dfn, dfd, nc):\n return special.ncfdtr(dfn, dfd, nc, x)\n\n def _ppf(self, q, dfn, dfd, nc):\n return special.ncfdtri(dfn, dfd, nc, q)\n\n def _bnd(self, x, dfn, dfd, nc):\n return 0.0, self._ppf(1-1e-10, dfn, dfd, nc)\n\n\nclass F(Add):\n \"\"\"\n (Non-central) F or Fisher-Snedecor distribution.\n\n Args:\n n (float, Dist) : Degres of freedom for numerator\n m (float, Dist) : Degres of freedom for denominator\n scale (float, Dist) : Scaling parameter\n shift (float, Dist) : Location parameter\n nc (float, Dist) : Non-centrality parameter\n\n Examples:\n >>> distribution = chaospy.F(3, 3, 2, 1, 1)\n >>> print(distribution)\n F(m=3, n=3, nc=1, scale=2, shift=1)\n >>> q = numpy.linspace(0, 1, 6)[1:-1]\n >>> print(numpy.around(distribution.inv(q), 4))\n [1.9336 2.9751 4.7028 8.8521]\n >>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))\n [0.2 0.4 0.6 0.8]\n >>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))\n [0.2277 0.1572 0.0837 0.027 ]\n >>> print(numpy.around(distribution.sample(4), 4))\n [ 5.4212 1.5739 25.7656 3.5586]\n >>> print(distribution.mom(1) > 10**8) # undefined\n True\n \"\"\"\n\n def __init__(self, n=1, m=1, scale=1, shift=0, nc=0):\n self._repr = {\"n\": n, \"m\": m, \"scale\": scale, \"shift\": shift, \"nc\": nc}\n Add.__init__(self, left=f(n, m, nc)*scale, right=shift)\n"
] | [
[
"scipy.special.gammaln",
"scipy.special.beta",
"scipy.special.assoc_laguerre",
"numpy.exp",
"scipy.special.ncfdtri",
"scipy.special.ncfdtr"
]
] |
amgrigoriev/daal4py | [
"97fbe7a9181410dac348dc724178e8605492e3c4"
] | [
"tests/test_estimators.py"
] | [
"#*******************************************************************************\n# Copyright 2014-2020 Intel Corporation\n# All Rights Reserved.\n#\n# This software is licensed under the Apache License, Version 2.0 (the\n# \"License\"), the following terms apply:\n#\n# You may not use this file except in compliance with the License. You may\n# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#*******************************************************************************\n\nimport unittest\n\nfrom sklearn.utils.estimator_checks import check_estimator\nimport sklearn.utils.estimator_checks\n\nfrom daal4py import __daal_run_version__\ndaal_run_version = tuple(map(int, (__daal_run_version__[0:4], __daal_run_version__[4:8])))\n\nfrom daal4py.sklearn.neighbors import KNeighborsClassifier\nfrom daal4py.sklearn.ensemble import RandomForestClassifier\nfrom daal4py.sklearn.ensemble import RandomForestRegressor\nfrom daal4py.sklearn.ensemble import GBTDAALClassifier\nfrom daal4py.sklearn.ensemble import GBTDAALRegressor\nfrom daal4py.sklearn.ensemble import AdaBoostClassifier\n\nfrom daal4py import __daal_link_version__ as dv\ndaal_version = tuple(map(int, (dv[0:4], dv[4:8])))\n\n\ndef check_version(rule, target):\n if not isinstance(rule[0], type(target)):\n if rule > target:\n return False\n else:\n for rule_item in range(len(rule)):\n if rule[rule_item] > target:\n return False\n else:\n if rule[rule_item][0]==target[0]:\n break\n return True\n\ndef _replace_and_save(md, fns, replacing_fn):\n \"\"\"\n Replaces functions in `fns` list in `md` module with `replacing_fn`.\n\n Returns the dictionary with functions that were replaced.\n \"\"\"\n saved = dict()\n for check_f in fns:\n try:\n fn = getattr(md, check_f)\n setattr(md, check_f, replacing_fn)\n saved[check_f] = fn\n except:\n pass\n return saved\n\n\ndef _restore_from_saved(md, saved_dict):\n \"\"\"\n Restores functions in `md` that were replaced in the function above.\n \"\"\"\n for check_f in saved_dict:\n setattr(md, check_f, saved_dict[check_f])\n\n\nclass Test(unittest.TestCase):\n def test_KNeighborsClassifier(self):\n check_estimator(KNeighborsClassifier)\n\n @unittest.skipUnless(check_version(((2019,0),(2021, 107)), daal_version), \"not supported in this library version\")\n def test_RandomForestClassifier(self):\n # check_methods_subset_invariance fails.\n # Issue is created:\n # https://github.com/IntelPython/daal4py/issues/129\n # Skip the test\n def dummy(*args, **kwargs):\n pass\n\n md = sklearn.utils.estimator_checks\n saved = _replace_and_save(md, ['check_methods_subset_invariance', 'check_dict_unchanged'], dummy)\n check_estimator(RandomForestClassifier)\n _restore_from_saved(md, saved)\n\n def test_RandomForestRegressor(self):\n # check_fit_idempotent is known to fail with DAAL's decision\n # forest regressor, due to different partitioning of data\n # between threads from run to run.\n # Hence skip that test\n def dummy(*args, **kwargs):\n pass\n md = sklearn.utils.estimator_checks\n saved = _replace_and_save(md, ['check_methods_subset_invariance', 'check_dict_unchanged'], dummy)\n check_estimator(RandomForestRegressor)\n _restore_from_saved(md, saved)\n\n def test_GBTDAALClassifier(self):\n check_estimator(GBTDAALClassifier)\n\n def test_GBTDAALRegressor(self):\n def dummy(*args, **kwargs):\n pass\n\n md = sklearn.utils.estimator_checks\n # got unexpected slightly different prediction result between two same calls in this test\n saved = _replace_and_save(md, ['check_estimators_data_not_an_array'], dummy)\n check_estimator(GBTDAALRegressor)\n _restore_from_saved(md, saved)\n\n @unittest.skipIf(daal_run_version < (2020, 0), \"not supported in this library version\")\n def test_AdaBoostClassifier(self):\n check_estimator(AdaBoostClassifier)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"sklearn.utils.estimator_checks.check_estimator"
]
] |
dvtrung/dl-torch | [
"b49e57d10d32bb223e2d7643f2579ccc32c63a9a"
] | [
"dlex/datasets/nlp/utils.py"
] | [
"\"\"\"NLP Dataset\"\"\"\r\nimport os\r\nimport re\r\nfrom typing import List, Union, Dict, Tuple\r\n\r\nimport nltk\r\nimport unicodedata\r\nimport numpy as np\r\nfrom dlex.configs import ModuleConfigs\r\nfrom dlex.utils.logging import logger\r\n\r\n\r\n# nltk.download('punkt')\r\n\r\n\r\n# Turn a Unicode string to plain ASCII, thanks to\r\n# https://stackoverflow.com/a/518232/2809427\r\n\r\n\r\ndef unicodeToAscii(s):\r\n return ''.join(\r\n c for c in unicodedata.normalize('NFD', s)\r\n if unicodedata.category(c) != 'Mn'\r\n )\r\n\r\n\r\ndef load_tkn_to_idx(filename):\r\n tkn_to_idx = {}\r\n fo = open(filename, encoding='utf-8')\r\n for line in fo:\r\n line = line.strip()\r\n if line == \"\":\r\n continue\r\n tkn_to_idx[line] = len(tkn_to_idx)\r\n fo.close()\r\n return tkn_to_idx\r\n\r\n\r\ndef normalize_lower(sentence: str):\r\n return sentence.strip().lower()\r\n\r\n\r\ndef normalize_lower_alphanumeric(sentence: str):\r\n s = sentence.strip().lower()\r\n s = re.sub(\"[^a-z0-9\\uAC00-\\uD7A3]+\", \" \", s)\r\n return s\r\n\r\n\r\ndef normalize_string_ascii(sentence):\r\n \"\"\"\r\n :param str sentence:\r\n :return: normalized sentence, separated by space\r\n :rtype str\r\n \"\"\"\r\n # x = re.sub(\"[^ a-zA-Z0-9\\uAC00-\\uD7A3]+\", \" \", x)\r\n # x = re.sub(\"[\\u3040-\\u30FF]+\", \"\\u3042\", x) # convert Hiragana and Katakana to あ\r\n # x = re.sub(\"[\\u4E00-\\u9FFF]+\", \"\\u6F22\", x) # convert CJK unified ideographs to 漢\r\n sent = unicodeToAscii(sentence.lower().strip())\r\n sent = re.sub(r\"([.!?,])\", r\" \\1\", sent)\r\n sent = re.sub(r\"[^a-zA-Z.!?,]+\", r\" \", sent)\r\n sent = re.sub(r\"\\s+\", \" \", sent)\r\n sent = re.sub(\"^ | $\", \"\", sent)\r\n\r\n words = sent.split(' ')\r\n ret = []\r\n for word in words:\r\n ret.append(normalize_word(word))\r\n return ' '.join(ret)\r\n\r\n\r\ndef normalize_string(sentence):\r\n \"\"\"\r\n :param str sentence:\r\n :return: normalized sentence, separated by space\r\n :rtype str\r\n \"\"\"\r\n # x = re.sub(\"[^ a-zA-Z0-9\\uAC00-\\uD7A3]+\", \" \", x)\r\n # x = re.sub(\"[\\u3040-\\u30FF]+\", \"\\u3042\", x) # convert Hiragana and Katakana to あ\r\n # x = re.sub(\"[\\u4E00-\\u9FFF]+\", \"\\u6F22\", x) # convert CJK unified ideographs to 漢\r\n sentence = re.sub(r\"([\\.!?,\\\";\\(\\)])\\'\", r\" \\1\", sentence)\r\n # sent = re.sub(r\"[^a-zA-Z.!?,]+\", r\" \", sent)\r\n sentence = re.sub(r\"\\s+\", \" \", sentence)\r\n sentence = re.sub(\"^ | $\", \"\", sentence)\r\n\r\n words = sentence.split(' ')\r\n ret = []\r\n for word in words:\r\n ret.append(normalize_word(word))\r\n return ' '.join(ret)\r\n\r\n\r\ndef normalize_word(word):\r\n punctuations = [',', '.', '-', '\"', ':', '!', '(', ')', '...', '?']\r\n if word in ',.!?':\r\n return word\r\n elif word in punctuations:\r\n return '<punc>'\r\n elif any('0' <= c <= '9' for c in word):\r\n return '<non-word>'\r\n else:\r\n return word.lower()\r\n\r\n\r\ndef normalize_none(s):\r\n return s\r\n\r\n\r\ndef nltk_tokenize(s):\r\n return nltk.word_tokenize(s)\r\n\r\n\r\nclass Tokenizer:\r\n def __init__(self, normalize_fn=None, tokenize_fn=None):\r\n self.normalize_fn = normalize_fn\r\n self.tokenize_fn = tokenize_fn\r\n\r\n def process(self, s):\r\n s = self.normalize_fn(s)\r\n s = self.tokenize_fn(s)\r\n return s\r\n\r\n\r\nspacy_nlp = None\r\n\r\n\r\ndef spacy_tokenize(s):\r\n import spacy\r\n from spacy.symbols import ORTH\r\n global spacy_nlp\r\n if spacy_nlp is None:\r\n # sputnik.install('spacy', spacy.about.__version__, 'en_default', data_path=ModuleConfigs.get_tmp_path())\r\n spacy_nlp = spacy.load('en_core_web_sm', via=ModuleConfigs.get_tmp_path())\r\n spacy_nlp.tokenizer.add_special_case('<eos>', [{ORTH: '<eos>'}])\r\n spacy_nlp.tokenizer.add_special_case('<bos>', [{ORTH: '<bos>'}])\r\n spacy_nlp.tokenizer.add_special_case('<unk>', [{ORTH: '<unk>'}])\r\n return [_s.text for _s in spacy_nlp.tokenizer(s)]\r\n\r\n\r\ndef normalize_char(char):\r\n return char.lower().replace(' ', '_')\r\n\r\n\r\ndef space_tokenize(s):\r\n return s.split(' ')\r\n\r\n\r\ndef char_tokenize(s: str):\r\n s = s.replace(\" \", \"_\")\r\n return list(s)\r\n\r\n\r\ndef mecab_tokenize(s):\r\n import MeCab\r\n wakati = MeCab.Tagger(\"-Owakati\")\r\n return wakati.parse(s).split()\r\n\r\n\r\ndef write_vocab(\r\n text: Union[str, List[str], List[List[str]]],\r\n output_path: str,\r\n tokenizer: Tokenizer = None,\r\n min_freq=0,\r\n specials=None):\r\n \"\"\"\r\n\r\n :param text: text or list of sentences\r\n :param output_path:\r\n :param tokenizer: if tokenizer is None, tokens are separated by space\r\n :param min_freq:\r\n :param specials:\r\n :return:\r\n \"\"\"\r\n if tokenizer is None:\r\n tokenizer = Tokenizer(normalize_none, space_tokenize)\r\n if specials is None:\r\n specials = ['<pad>', '<sos>', '<eos>', '<oov>']\r\n word_freqs = {}\r\n\r\n if isinstance(text, str):\r\n text = [text]\r\n\r\n for sent in text:\r\n if isinstance(sent, str):\r\n # if normalize_fn is not None:\r\n # s = normalize_fn(sent.replace('_', ' '))\r\n # else:\r\n # s = sent\r\n # ls = char_tokenize(s) if token == 'char' else space_tokenize(s)\r\n sent = tokenizer.process(sent)\r\n \r\n for word in sent:\r\n if word.strip() == '':\r\n continue\r\n if word in word_freqs:\r\n word_freqs[word] += 1\r\n else:\r\n word_freqs[word] = 1\r\n\r\n words = list([word for word in word_freqs if word_freqs[word] > min_freq])\r\n words.sort(key=lambda word: word_freqs[word], reverse=True)\r\n with open(output_path, \"w\", encoding='utf-8') as fo:\r\n fo.write('\\n'.join(specials) + '\\n')\r\n fo.write(\"\\n\".join(words))\r\n\r\n logger.info(\"Vocab written to %s (%d tokens)\", output_path, len(specials) + len(words))\r\n\r\n\r\ndef get_token_id(vocab, word):\r\n \"\"\"\r\n :type vocab: Vocab\r\n :type word: str\r\n :rtype: int\r\n \"\"\"\r\n if word in vocab:\r\n return vocab[word]\r\n else:\r\n if '<oov>' in vocab:\r\n return vocab['<oov>']\r\n elif '<unk>' in vocab:\r\n return vocab['<unk>']\r\n else:\r\n raise Exception(\"No out-of-vocabulary token found.\")\r\n\r\n\r\nclass Vocab:\r\n def __init__(self, index2token: List[str] = None, token2index: Dict[str, int] = None):\r\n if index2token is None:\r\n self._token2index = {}\r\n self._index2token = []\r\n else:\r\n self._index2token = index2token\r\n if token2index:\r\n self._token2index = token2index\r\n else:\r\n self._token2index = {token: idx for idx, token in enumerate(index2token)}\r\n self.embeddings = None\r\n self.embedding_dim = None\r\n\r\n @classmethod\r\n def from_file(cls, file_name):\r\n index2token = []\r\n fo = open(file_name, encoding='utf-8')\r\n for line in fo:\r\n line = line.strip()\r\n if line == \"\":\r\n continue\r\n index2token.append(line)\r\n fo.close()\r\n return cls(index2token)\r\n\r\n def __getitem__(self, token: str) -> int:\r\n return self._token2index[token] if token in self._token2index else self.oov_token_idx\r\n\r\n def tolist(self) -> List[str]:\r\n return self._index2token\r\n\r\n def get_token_id(self, token):\r\n return self[token] or self.oov_token_idx\r\n\r\n def add_token(self, token: str):\r\n if token not in self._token2index:\r\n self._token2index[token] = len(self._token2index)\r\n self._index2token.append(token)\r\n\r\n def __len__(self):\r\n return len(self._token2index)\r\n\r\n def get_token(self, idx: int) -> str:\r\n return self._index2token[idx]\r\n\r\n def decode_idx_list(self, ls: List[int], ignore: List[int] = None, stop_at: int = None) -> List[str]:\r\n ret = []\r\n for idx in ls:\r\n if stop_at and idx == stop_at:\r\n break\r\n elif ignore and idx in ignore:\r\n continue\r\n else:\r\n ret.append(self.get_token(idx))\r\n return ret\r\n\r\n def encode_token_list(self, ls: List[str]) -> List[int]:\r\n return [self.get_token_id(token) for token in ls]\r\n\r\n @property\r\n def sos_token_idx(self) -> int:\r\n idx = self['<sos>'] or self['<s>']\r\n assert idx is not None\r\n return idx\r\n\r\n @property\r\n def eos_token_idx(self) -> int:\r\n idx = self['<eos>'] or self['</s>']\r\n assert idx is not None\r\n return idx\r\n\r\n @property\r\n def blank_token_idx(self):\r\n idx = self['<blank>'] or self['<pad>']\r\n assert idx is not None\r\n return idx\r\n\r\n @property\r\n def oov_token_idx(self) -> int:\r\n if '<oov>' in self._token2index:\r\n return self._token2index['<oov>']\r\n elif '<unk>' in self._token2index:\r\n return self._token2index['<unk>']\r\n else:\r\n raise Exception(\"<oov> token not found.\")\r\n\r\n def get_specials(self):\r\n return [token for token in self._index2token if token.startswith('<')]\r\n\r\n def init_pretrained_embeddings(\r\n self,\r\n pretrained: str,\r\n emb_name: str = None,\r\n dim: int = None) -> np.ndarray:\r\n if pretrained == 'glove':\r\n from torchtext.vocab import GloVe\r\n dim = dim or 300\r\n vocab = GloVe(\r\n name=emb_name or '840B', dim=dim,\r\n cache=os.path.join(ModuleConfigs.get_tmp_path(), \"torchtext\"))\r\n elif pretrained == 'fasttext':\r\n from torchtext.vocab import FastText\r\n vocab = FastText()\r\n else:\r\n raise ValueError(\"Pre-trained embeddings not found.\")\r\n\r\n vectors = vocab.vectors\r\n oovs = []\r\n\r\n embeddings = np.zeros([len(self), dim])\r\n for idx, t in enumerate(self._index2token):\r\n _t = t.lower()\r\n if _t in vocab.stoi:\r\n embeddings[idx, :] = vectors[vocab.stoi[_t]].cpu().numpy()\r\n if all(token in vocab.stoi for token in _t.split(' ')):\r\n embeddings[idx, :] = np.sum([vectors[vocab.stoi[token]].cpu().numpy() for token in _t.split(' ')])\r\n else:\r\n oovs.append(_t)\r\n\r\n if oovs:\r\n logger.warning(f\"{len(oovs)} tokens not found in pre-trained embeddings: {', '.join(oovs)}\")\r\n\r\n logger.debug(f\"Load embeddings: {pretrained} (no. embeddings: {len(self) - len(oovs):,})\")\r\n\r\n self.embedding_dim = dim\r\n self.embeddings = embeddings\r\n\r\n def get_token_embedding(self, token: str) -> np.ndarray:\r\n if self.embeddings is None:\r\n raise ValueError('Embeddings are not initialized')\r\n return self.embeddings[self.get_token_id(token)]\r\n\r\n def embed_token_list(self, ls):\r\n emb = np.zeros(self.embedding_dim)\r\n for token in ls:\r\n emb += self.get_token_embedding(token)\r\n return emb\r\n\r\n\r\ndef load_embeddings(\r\n pretrained: str,\r\n emb_name: str = None,\r\n dim: int = None,\r\n vocab_size: int = None,\r\n tokens: List[str] = None,\r\n specials: List[str] = None) -> Tuple[np.ndarray, Vocab]:\r\n \"\"\"\r\n Load pre-trained embedding defined in dataset.embeddings\r\n :param tokens: if specified, only load embeddings of these tokens\r\n :param specials: special tokens\r\n :return:\r\n \"\"\"\r\n if not pretrained:\r\n assert dim is not None\r\n assert vocab_size is not None\r\n return np.random.rand(vocab_size, dim), None\r\n elif pretrained.lower() in [\"glove\", \"fasttext\"]:\r\n if pretrained.lower() == 'glove':\r\n from torchtext.vocab import GloVe\r\n vocab = GloVe(\r\n name=emb_name, dim=dim,\r\n cache=os.path.join(ModuleConfigs.get_tmp_path(), \"torchtext\"))\r\n elif pretrained.lower() == 'fasttext':\r\n from torchtext.vocab import FastText\r\n vocab = FastText()\r\n else:\r\n raise ValueError(\"Pre-trained embeddings not found.\")\r\n\r\n vectors = vocab.vectors\r\n index2token = vocab.itos\r\n token2index = None\r\n if tokens: # limit vocabulary to list of tokens\r\n num_oovs = 0\r\n keep = []\r\n index2token = []\r\n token2index = {}\r\n for t in tokens:\r\n _t = t.lower()\r\n if _t in token2index:\r\n if t not in token2index:\r\n token2index[t] = token2index[_t]\r\n elif _t in vocab.stoi:\r\n keep.append(vocab.stoi[_t.lower()])\r\n token2index[_t] = len(index2token)\r\n token2index[t] = len(index2token)\r\n index2token.append(_t)\r\n else:\r\n num_oovs += 1\r\n vectors = vectors[keep]\r\n if num_oovs:\r\n logger.warning(f\"{num_oovs} tokens not found in pre-trained embeddings\")\r\n\r\n logger.debug(f\"Load embeddings: {pretrained} (no. embeddings: {len(index2token):,})\")\r\n\r\n if specials is not None:\r\n for s in specials:\r\n token2index[s] = len(index2token)\r\n index2token.append(s)\r\n index2token += specials\r\n vectors = torch.cat([vectors, torch.rand(len(specials), len(vectors[0]))])\r\n\r\n # return nn.Embedding.from_pretrained(vectors, freeze=emb.freeze or True), Vocab(index2token, token2index)\r\n return vectors, Vocab(index2token, token2index)\r\n else:\r\n raise ValueError(f\"{pretrained} is not supported.\")\r\n"
] | [
[
"numpy.random.rand",
"numpy.zeros"
]
] |
annaproxy/udify-metalearning | [
"55206a3aac0aba74a3615a36192d03b6467cfd6f"
] | [
"allennlp/tests/data/fields/sequence_label_field_test.py"
] | [
"# pylint: disable=no-self-use,invalid-name\nfrom collections import defaultdict\n\nimport pytest\nimport numpy\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.data import Token, Vocabulary\nfrom allennlp.data.fields import TextField, SequenceLabelField\nfrom allennlp.data.token_indexers import SingleIdTokenIndexer\n\n\nclass TestSequenceLabelField(AllenNlpTestCase):\n def setUp(self):\n super(TestSequenceLabelField, self).setUp()\n self.text = TextField([Token(t) for t in [\"here\", \"are\", \"some\", \"words\", \".\"]],\n {\"words\": SingleIdTokenIndexer(\"words\")})\n\n def test_tag_length_mismatch_raises(self):\n with pytest.raises(ConfigurationError):\n wrong_tags = [\"B\", \"O\", \"O\"]\n _ = SequenceLabelField(wrong_tags, self.text)\n\n def test_count_vocab_items_correctly_indexes_tags(self):\n tags = [\"B\", \"I\", \"O\", \"O\", \"O\"]\n sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=\"labels\")\n\n counter = defaultdict(lambda: defaultdict(int))\n sequence_label_field.count_vocab_items(counter)\n\n assert counter[\"labels\"][\"B\"] == 1\n assert counter[\"labels\"][\"I\"] == 1\n assert counter[\"labels\"][\"O\"] == 3\n assert set(counter.keys()) == {\"labels\"}\n\n def test_index_converts_field_correctly(self):\n vocab = Vocabulary()\n b_index = vocab.add_token_to_namespace(\"B\", namespace='*labels')\n i_index = vocab.add_token_to_namespace(\"I\", namespace='*labels')\n o_index = vocab.add_token_to_namespace(\"O\", namespace='*labels')\n\n tags = [\"B\", \"I\", \"O\", \"O\", \"O\"]\n sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=\"*labels\")\n sequence_label_field.index(vocab)\n\n # pylint: disable=protected-access\n assert sequence_label_field._indexed_labels == [b_index, i_index, o_index, o_index, o_index]\n # pylint: enable=protected-access\n\n def test_as_tensor_produces_integer_targets(self):\n vocab = Vocabulary()\n vocab.add_token_to_namespace(\"B\", namespace='*labels')\n vocab.add_token_to_namespace(\"I\", namespace='*labels')\n vocab.add_token_to_namespace(\"O\", namespace='*labels')\n\n tags = [\"B\", \"I\", \"O\", \"O\", \"O\"]\n sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=\"*labels\")\n sequence_label_field.index(vocab)\n padding_lengths = sequence_label_field.get_padding_lengths()\n tensor = sequence_label_field.as_tensor(padding_lengths).detach().cpu().numpy()\n numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 1, 2, 2, 2]))\n\n def test_sequence_label_field_raises_on_incorrect_type(self):\n\n with pytest.raises(ConfigurationError):\n _ = SequenceLabelField([[], [], [], [], []], self.text)\n\n def test_class_variables_for_namespace_warnings_work_correctly(self):\n # pylint: disable=protected-access\n tags = [\"B\", \"I\", \"O\", \"O\", \"O\"]\n assert \"text\" not in SequenceLabelField._already_warned_namespaces\n with self.assertLogs(logger=\"allennlp.data.fields.sequence_label_field\", level=\"WARNING\"):\n _ = SequenceLabelField(tags, self.text, label_namespace=\"text\")\n\n # We've warned once, so we should have set the class variable to False.\n assert \"text\" in SequenceLabelField._already_warned_namespaces\n with pytest.raises(AssertionError):\n with self.assertLogs(logger=\"allennlp.data.fields.sequence_label_field\", level=\"WARNING\"):\n _ = SequenceLabelField(tags, self.text, label_namespace=\"text\")\n\n # ... but a new namespace should still log a warning.\n assert \"text2\" not in SequenceLabelField._already_warned_namespaces\n with self.assertLogs(logger=\"allennlp.data.fields.sequence_label_field\", level=\"WARNING\"):\n _ = SequenceLabelField(tags, self.text, label_namespace=\"text2\")\n\n def test_printing_doesnt_crash(self):\n tags = [\"B\", \"I\", \"O\", \"O\", \"O\"]\n sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=\"labels\")\n print(sequence_label_field)\n\n def test_sequence_methods(self):\n tags = [\"B\", \"I\", \"O\", \"O\", \"O\"]\n sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=\"labels\")\n\n assert len(sequence_label_field) == 5\n assert sequence_label_field[1] == \"I\"\n assert [label for label in sequence_label_field] == tags\n"
] | [
[
"numpy.array"
]
] |
andrewliao11/Andrew_tensorpack | [
"735a2672e3d93b5b612a303b5b6d222e9b2d4280"
] | [
"tensorpack/dataflow/dataset/ilsvrc.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: ilsvrc.py\n# Author: Yuxin Wu <[email protected]>\nimport os\nimport tarfile\nimport cv2\nimport numpy as np\nfrom six.moves import range\nimport xml.etree.ElementTree as ET\n\nfrom ...utils import logger, get_rng, get_dataset_path\nfrom ...utils.loadcaffe import get_caffe_pb\nfrom ...utils.fs import mkdir_p, download\nfrom ...utils.timer import timed_operation\nfrom ..base import RNGDataFlow\n\n__all__ = ['ILSVRCMeta', 'ILSVRC12']\n\nCAFFE_ILSVRC12_URL = \"http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz\"\n\nclass ILSVRCMeta(object):\n \"\"\"\n Some metadata for ILSVRC dataset.\n \"\"\"\n def __init__(self, dir=None):\n if dir is None:\n dir = get_dataset_path('ilsvrc_metadata')\n self.dir = dir\n mkdir_p(self.dir)\n self.caffepb = get_caffe_pb()\n f = os.path.join(self.dir, 'synsets.txt')\n if not os.path.isfile(f):\n self._download_caffe_meta()\n\n def get_synset_words_1000(self):\n \"\"\"\n :returns a dict of {cls_number: cls_name}\n \"\"\"\n fname = os.path.join(self.dir, 'synset_words.txt')\n assert os.path.isfile(fname)\n lines = [x.strip() for x in open(fname).readlines()]\n return dict(enumerate(lines))\n\n def get_synset_1000(self):\n \"\"\"\n :returns a dict of {cls_number: synset_id}\n \"\"\"\n fname = os.path.join(self.dir, 'synsets.txt')\n assert os.path.isfile(fname)\n lines = [x.strip() for x in open(fname).readlines()]\n return dict(enumerate(lines))\n\n def _download_caffe_meta(self):\n fpath = download(CAFFE_ILSVRC12_URL, self.dir)\n tarfile.open(fpath, 'r:gz').extractall(self.dir)\n\n def get_image_list(self, name):\n \"\"\"\n :param name: 'train' or 'val' or 'test'\n :returns: list of (image filename, cls)\n \"\"\"\n assert name in ['train', 'val', 'test']\n fname = os.path.join(self.dir, name + '.txt')\n assert os.path.isfile(fname)\n with open(fname) as f:\n ret = []\n for line in f.readlines():\n name, cls = line.strip().split()\n ret.append((name, int(cls)))\n assert len(ret)\n return ret\n\n def get_per_pixel_mean(self, size=None):\n \"\"\"\n :param size: return image size in [h, w]. default to (256, 256)\n :returns: per-pixel mean as an array of shape (h, w, 3) in range [0, 255]\n \"\"\"\n obj = self.caffepb.BlobProto()\n\n mean_file = os.path.join(self.dir, 'imagenet_mean.binaryproto')\n with open(mean_file, 'rb') as f:\n obj.ParseFromString(f.read())\n arr = np.array(obj.data).reshape((3, 256, 256)).astype('float32')\n arr = np.transpose(arr, [1,2,0])\n if size is not None:\n arr = cv2.resize(arr, size[::-1])\n return arr\n\nclass ILSVRC12(RNGDataFlow):\n def __init__(self, dir, name, meta_dir=None, shuffle=True,\n dir_structure='original', include_bb=False):\n \"\"\"\n :param dir: A directory containing a subdir named `name`, where the\n original ILSVRC12_`name`.tar gets decompressed.\n :param name: 'train' or 'val' or 'test'\n :param dir_structure: The dir structure of 'val' and 'test'.\n If is 'original' then keep the original decompressed directory with list\n of image files (as below). If set to 'train', use the the same\n directory structure as 'train/', with class name as subdirectories.\n :param include_bb: Include the bounding box. Maybe useful in training.\n\n When `dir_structure=='original'`, `dir` should have the following structure:\n\n .. code-block:: none\n\n dir/\n train/\n n02134418/\n n02134418_198.JPEG\n ...\n ...\n val/\n ILSVRC2012_val_00000001.JPEG\n ...\n test/\n ILSVRC2012_test_00000001.JPEG\n ...\n bbox/\n n02134418/\n n02134418_198.xml\n ...\n ...\n\n After decompress ILSVRC12_img_train.tar, you can use the following\n command to build the above structure for `train/`:\n\n .. code-block:: none\n\n tar xvf ILSVRC12_img_train.tar -C train && cd train\n find -type f -name '*.tar' | parallel -P 10 'echo {} && mkdir -p {/.} && tar xf {} -C {/.}'\n Or:\n for i in *.tar; do dir=${i%.tar}; echo $dir; mkdir -p $dir; tar xf $i -C $dir; done\n\n \"\"\"\n assert name in ['train', 'test', 'val']\n self.full_dir = os.path.join(dir, name)\n self.name = name\n assert os.path.isdir(self.full_dir), self.full_dir\n self.shuffle = shuffle\n meta = ILSVRCMeta(meta_dir)\n self.imglist = meta.get_image_list(name)\n self.dir_structure = dir_structure\n self.synset = meta.get_synset_1000()\n\n if include_bb:\n bbdir = os.path.join(dir, 'bbox') if not \\\n isinstance(include_bb, six.string_types) else include_bb\n assert name == 'train', 'Bounding box only available for training'\n self.bblist = ILSVRC12.get_training_bbox(bbdir, self.imglist)\n self.include_bb = include_bb\n\n def size(self):\n return len(self.imglist)\n\n def get_data(self):\n \"\"\"\n Produce original images of shape [h, w, 3(BGR)], and label,\n and optionally a bbox of [xmin, ymin, xmax, ymax]\n \"\"\"\n idxs = np.arange(len(self.imglist))\n add_label_to_fname = (self.name != 'train' and self.dir_structure != 'original')\n if self.shuffle:\n self.rng.shuffle(idxs)\n for k in idxs:\n fname, label = self.imglist[k]\n if add_label_to_fname:\n fname = os.path.join(self.full_dir, self.synset[label], fname)\n else:\n fname = os.path.join(self.full_dir, fname)\n im = cv2.imread(fname.strip(), cv2.IMREAD_COLOR)\n assert im is not None, fname\n if im.ndim == 2:\n im = np.expand_dims(im, 2).repeat(3,2)\n if self.include_bb:\n bb = self.bblist[k]\n if bb is None:\n bb = [0, 0, im.shape[1]-1, im.shape[0]-1]\n yield [im, label, bb]\n else:\n yield [im, label]\n\n @staticmethod\n def get_training_bbox(bbox_dir, imglist):\n ret = []\n\n def parse_bbox(fname):\n root = ET.parse(fname).getroot()\n size = root.find('size').getchildren()\n size = map(int, [size[0].text, size[1].text])\n\n box = root.find('object').find('bndbox').getchildren()\n box = map(lambda x: float(x.text), box)\n #box[0] /= size[0]\n #box[1] /= size[1]\n #box[2] /= size[0]\n #box[3] /= size[1]\n return np.asarray(box, dtype='float32')\n\n with timed_operation('Loading Bounding Boxes ...'):\n cnt = 0\n import tqdm\n for k in tqdm.trange(len(imglist)):\n fname = imglist[k][0]\n fname = fname[:-4] + 'xml'\n fname = os.path.join(bbox_dir, fname)\n try:\n ret.append(parse_bbox(fname))\n cnt += 1\n except KeyboardInterrupt:\n raise\n except:\n ret.append(None)\n logger.info(\"{}/{} images have bounding box.\".format(cnt, len(imglist)))\n return ret\n\nif __name__ == '__main__':\n meta = ILSVRCMeta()\n #print(meta.get_synset_words_1000())\n\n ds = ILSVRC12('/home/wyx/data/fake_ilsvrc/', 'train', include_bb=True,\n shuffle=False)\n ds.reset_state()\n\n for k in ds.get_data():\n from IPython import embed; embed()\n break\n"
] | [
[
"numpy.array",
"numpy.expand_dims",
"numpy.transpose",
"numpy.asarray"
]
] |
droyston/spectralize | [
"572770e7358acc3ec433470659759c17453409f2"
] | [
"app/clean_test_app.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 18:54:48 2020\n\n@author: dylanroyston\n\"\"\"\n\n\n# -*- coding: utf-8 -*-\n\n# import packages\n#import dash_player\nimport dash\nimport dash_table\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport psycopg2\nimport os\nimport pandas as pd\nimport numpy as np\nimport plotly\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport librosa\nimport librosa.display as ld\nimport IPython.display as ipd\nimport pylab as pl\nimport boto3\n#import matplotlib as mpl\n#import matplotlib.pyplot as plt\n#from matplotlib import cm\n#from colorspacious import cspace_converter\n#from collections import OrderedDict\n\n######\n\n\n# connect to PSQL and retrieve \npsql_usr = os.environ.get('PSQL_USR')\npsql_pw = os.environ.get('PSQL_PW')\n\nconn = psycopg2.connect(host = 'ec2-13-58-251-142.us-east-2.compute.amazonaws.com',\n dbname = 'spectralize',\n user='postgres',\n password=psql_pw)\n\n\n\n##### read out metadata\nmetadata = conn.cursor()\n\nmetadata.execute(\"SELECT * FROM clean_metadata WHERE false;\")\ncols = set(metadata.fetchall())\n\nmetadata.execute(\"SELECT * FROM clean_metadata;\")\nmd = set(metadata.fetchall())\n\ncols = [\"s3_key\", \"song_id\", \"album\", \"albumartist\", \"artist\", \n \"audio_offset\", \"bitrate\", \"channels\", \"comment\", \"composer\",\n \"disc\", \"disc_total\", \"duration\", \"filesize\", \"genre\",\n \"samplerate\", \"title\", \"track\", \"track_total\", \"year\"]\n\ntag_df = pd.DataFrame(data=md, columns=cols)\n\n\n\n\n##### s3 acess for playing audio files\ns3_bucket = 'mdp-spectralize-pal'\nnumber_of_files = 0\ns3 = boto3.resource('s3')\nbucket = s3.Bucket(s3_bucket)\n \n# placeholders for callback initialization\nstandin_fp = '/home/dylanroyston/Documents/GIT/spectralize/app/hello.wav'\naudio_sd_file = standin_fp\n#audio_rawfile, new_sr = librosa.load(standin_fp, sr=None)\nstandin_data = np.array([[0,0],[0,0]])\nstandin_df = pd.DataFrame(standin_data, columns=['x','y'])\n#audio_fig = px.line(standin_df, x='x', y='y', title='audio data', render_mode='webgl')\nspec_fig = px.imshow(standin_df)\n\ndef load_audio_data(selected_row):\n # read out audio data\n \n #curr_song_id = tag_df.iloc[selected_row]['song_id']\n curr_song_id = selected_row\n \n # audiodata = conn.cursor()\n \n # qstring = 'SELECT intensity FROM clean_audio WHERE song_id=' + str(curr_song_id)\n \n # audiodata.execute(qstring)\n # ad = np.array(audiodata.fetchall())\n \n # audio_df = pd.DataFrame(data=ad, columns=['I'])\n # audio_fig = px.line(audio_df, x=audio_df.index, y='I', title='audio data', render_mode='webgl')\n # audio_fig.update_layout(\n # height=250,\n # margin_r=0,\n # margin_l=0,\n # margin_t=0,\n # yaxis_title='',\n # yaxis_fixedrange=True)\n \n\n s3_key = tag_df.iloc[curr_song_id]['s3_key']\n \n #this_row = tag_df.loc[tag_df['song_id'] == curr_song_id]\n #s3_key = tag_df.iloc[this_row]['s3_key']\n \n ext = s3_key[-4:]\n audio_sd_file = '/home/dylanroyston/Documents/GIT/spectralize/app/audio_file' + ext\n \n bucket.download_file(s3_key, audio_sd_file) \n \n #audio_rawfile = librosa.load(audio_sd_file)\n \n \n \n return audio_sd_file#, audio_fig\n\ndef load_spec_data(selected_row):\n \n curr_song_id = selected_row\n \n specdata = conn.cursor()\n qstring = 'SELECT * FROM clean_spec WHERE song_id=' + str(curr_song_id)\n specdata.execute(qstring)\n sd = np.array(specdata.fetchall())\n \n spec_df = pd.DataFrame(data=sd)\n \n #currtitle = tag_df.iloc[curr_song_id]['title']\n #currdur = tag_df.iloc[curr_song_id]['duration'] \n \n\n # numpts = len(sd)\n \n # interval = float(currdur) / numpts\n \n # timeline = np.linspace(0,float(currdur),numpts)\n \n # rt = timeline.round(0)\n\n trim_sd = spec_df.iloc[:,2:]\n spec_fig = px.imshow(trim_sd.transpose(),\n origin='lower',\n #title=currtitle,\n #x=timeline\n )\n spec_fig.update_layout(\n height=250,\n margin_r=0,\n margin_l=0,\n margin_t=0,\n yaxis_title='Frequency',\n xaxis_title='Time',\n #colorbar.title='power',\n yaxis_fixedrange=True,\n #x=str(rt)\n #title=currtitle\n )\n \n return spec_fig\n\n\n#####\n# initialize Dash app \nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\napp.layout = html.Div(children=[\n \n # header\n html.H1(children='Metadata'),\n \n # metadata table\n dash_table.DataTable(\n id = 'metadata_table',\n data=tag_df.to_dict('rows'),\n columns=[{'id': c, 'name': c} for c in tag_df.columns],\n style_cell={\n 'overflowX': 'auto',\n 'overflow': 'hidden',\n 'textOverflow': 'ellipsis',\n 'maxWidth': 10,\n 'row_selectable': 'single',\n 'font_family': 'Arial',\n 'font_size': '1.5rem',\n 'padding': '.5rem',\n 'backgroundColor': '#f4f4f2'\n },\n style_cell_conditional=[\n {'textAlign': 'center'}\n ],\n style_header={\n 'backgroundColor':'#f4f4f2',\n 'fontWeight': 'bold',\n 'overflowX': 'auto',\n 'textOverflow': 'ellipsis'\n },\n style_table={\n 'maxHeight':'500px',\n 'overflowX': 'scroll'\n },\n tooltip_data=[\n {\n column: {'value': str(value), 'type': 'markdown'}\n for column, value in row.items()\n } for row in tag_df.to_dict('rows')\n ],\n tooltip_duration=None,\n style_as_list_view=True,\n ),# end table\n \n \n # load audio button\n html.Br(),\n \n html.Div(\n [\n dcc.Input(id='input_songnum', value='input song number', type='number'),\n html.Button('Load audio', \n id='submit-val',\n style={'display': 'inline-block'},\n n_clicks=0),\n html.Div(id='song_input')\n ],\n ),\n \n html.Br(),\n \n \n # html.Audio(id=\"player\", src=audio_sd_file, controls=True, style={\n # \"width\": \"100%\"\n # }), \n # dash_player.DashPlayer(\n # id='player',\n # url='audio_sd_file',\n # controls=True\n # ),\n \n html.Br(),\n \n #dcc.Graph(id='waveform', figure=audio_fig),\n \n html.Br(),\n\n dcc.Graph(id='spect', figure=spec_fig)\n \n])\n##### finish Dash layout\n\n\n\n##### callbacks\n# load-audio button control\n# @app.callback(\n# Output('input_songnum', 'value'),\n# [Input('submit-val', 'n_clicks')]\n# )\n# def retrieve_audio(value):\n# return load_audio_data(value)\n\n\n# @app.callback(\n# Output('waveform', 'figure'),\n# [Input('submit-val', 'n_clicks')]\n# )\n# def update_A_figure(submit_val):\n# audio_fig = load_audio_data(submit_val)\n# return audio_fig\n \n\n## update audio player\n# @app.callback(\n# Output('player', 'src'),\n# [Input('submit-val', 'n_clicks')]\n# )\n# def update_player(submit_val):\n# audio_sd_file = load_audio_data(submit_val)\n# return audio_sd_file\n\n\n\n## update spect figure on button click\[email protected](\n Output('spect', 'figure'),\n [Input('submit-val', 'n_clicks'),\n Input('input_songnum', 'value')]\n )\ndef update_S_figure(n_clicks, value):\n \n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n \n if 'submit-val' in changed_id:\n spec_fig = load_spec_data(value)\n \n return spec_fig\n\n\n\n\n\n## combined audiofile/spec update\n# @app.callback(\n# [Output('player', 'src'),\n# Output('spect', 'figure')],\n# [Input('submit-val', 'n_clicks')]\n# )\n# def update_figures(submit_val):\n# audio_sd_file = load_audio_data(submit_val)\n# spec_fig = load_spec_data(submit_val)\n# return audio_sd_file, spec_fig\n \n\n# @app.callback(\n# Output('metadata_table', 'derived_virtual_selected_rows'),\n# [Input('submit-val', 'n_clicks'),\n# State('metadata_table', 'derived_virtual_selected_rows')]\n# )\n# def update_audio(n_clicks, derived_virtual_selected_rows):\n# if derived_virtual_selected_rows is None:\n# derived_virtual_selected_rows = []\n \n \n# return load_audio_data(derived_virtual_selected_rows)\n\n\nif __name__ == '__main__':\n #app.run_server(debug=True, port=8050, host='127.0.0.1')\n app.run_server(debug=True, port=8050, host='127.0.0.1')\n #app.run_server(debug=True, port=80, host='ec2-18-224-114-72.us-east-2.compute.amazonaws.com')\n"
] | [
[
"numpy.array",
"pandas.DataFrame"
]
] |
SoftwareUnderstanding/inspect4py | [
"9c4d7252535082ad938b26baf281d93f3a27285e"
] | [
"test/test_files/pylops/examples/plot_imag.py"
] | [
"\"\"\"\nImag\n====\n\nThis example shows how to use the :py:class:`pylops.basicoperators.Imag`\noperator.\nThis operator returns the imaginary part of the data as a real value in\nforward mode, and the real part of the model as an imaginary value in\nadjoint mode (with zero real part).\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as pltgs\n\nimport pylops\n\nplt.close('all')\n\n###############################################################################\n# Let's define a Imag operator :math:`\\mathbf{\\Im}` to extract the imaginary\n# component of the input.\n\nM = 5\nx = np.arange(M) + 1j * np.arange(M)[::-1]\nRop = pylops.basicoperators.Imag(M, dtype='complex128')\n\ny = Rop*x\nxadj = Rop.H*y\n\n_, axs = plt.subplots(1, 3, figsize=(10, 4))\naxs[0].plot(np.real(x), lw=2, label='Real')\naxs[0].plot(np.imag(x), lw=2, label='Imag')\naxs[0].legend()\naxs[0].set_title('Input')\naxs[1].plot(np.real(y), lw=2, label='Real')\naxs[1].plot(np.imag(y), lw=2, label='Imag')\naxs[1].legend()\naxs[1].set_title('Forward of Input')\naxs[2].plot(np.real(xadj), lw=2, label='Real')\naxs[2].plot(np.imag(xadj), lw=2, label='Imag')\naxs[2].legend()\naxs[2].set_title('Adjoint of Forward')\n"
] | [
[
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.real",
"numpy.imag"
]
] |
shubhamkumaR630/datasets | [
"fe9ee91849cefed0953141ea3588f73b7def78fd",
"fe9ee91849cefed0953141ea3588f73b7def78fd",
"fe9ee91849cefed0953141ea3588f73b7def78fd"
] | [
"tensorflow_datasets/summarization/summscreen/summscreen.py",
"tensorflow_datasets/audio/fuss.py",
"tensorflow_datasets/object_detection/kitti.py"
] | [
"# coding=utf-8\n# Copyright 2022 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"SummScreen Summarization dataset, non-anonymized, non-tokenized version.\"\"\"\n\nimport json\nimport os\n\nimport tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\n_DESCRIPTION = \"\"\"\nSummScreen Summarization dataset, non-anonymized, non-tokenized version.\n\nTrain/val/test splits and filtering are based on the final tokenized dataset,\nbut transcripts and recaps provided are based on the untokenized text.\n\nThere are two features:\n\n - transcript: Full episode transcripts, each line of dialogue\n separated by newlines\n - recap: Recaps or summaries of episodes\n\"\"\"\n\n_CITATION = \"\"\"\\\n@article{DBLP:journals/corr/abs-2104-07091,\n author = {Mingda Chen and\n Zewei Chu and\n Sam Wiseman and\n Kevin Gimpel},\n title = {SummScreen: {A} Dataset for Abstractive Screenplay Summarization},\n journal = {CoRR},\n volume = {abs/2104.07091},\n year = {2021},\n url = {https://arxiv.org/abs/2104.07091},\n archivePrefix = {arXiv},\n eprint = {2104.07091},\n timestamp = {Mon, 19 Apr 2021 16:45:47 +0200},\n biburl = {https://dblp.org/rec/journals/corr/abs-2104-07091.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\"\"\"\n\n_DL_URLS = {\n # pylint: disable=line-too-long\n 'tokenized':\n 'https://drive.google.com/uc?export=download&id=1BvdIllGBo9d2-bzXQRzWuJXB04XPVmfF',\n 'untokenized':\n 'https://drive.google.com/uc?export=download&id=1tFpt32USOO2i1FWhtFTsyYyFzuRm2k36',\n # pylint: enable=line-too-long\n}\n\n_RECAP = 'recap'\n_TRANSCRIPT = 'transcript'\n_RECAP_SOURCE_FULL_NAMES = {\n 'fd': 'ForeverDreaming',\n 'tms': 'TVMegaSite',\n}\n_SPLITS = ['train', 'dev', 'test']\n\n\ndef _load_file(path):\n with tf.io.gfile.GFile(path, 'r') as f:\n return f.read()\n\n\ndef _load_json(path):\n return json.loads(_load_file(path))\n\n\ndef _load_jsonl(path):\n return [json.loads(line) for line in _load_file(path).strip().splitlines()]\n\n\ndef _get_filenames_dict(tokenized_path, recap_source: str):\n \"\"\"Get dictionary of filenames for each split.\"\"\"\n filenames_dict = {}\n for split in _SPLITS:\n tokenized_data = _load_jsonl(\n os.path.join(tokenized_path, 'SummScreen',\n _RECAP_SOURCE_FULL_NAMES[recap_source],\n f'{recap_source}_{split}.json'))\n filenames_dict[split] = [row['filename'] for row in tokenized_data]\n return filenames_dict\n\n\ndef _get_paths_dict(untokenized_path, recap_source, filenames_dict):\n \"\"\"Get dictionary of example paths for each split.\"\"\"\n paths_dict = {}\n for split, filenames in filenames_dict.items():\n paths_dict[split] = [\n os.path.join(untokenized_path, 'SummScreen_raw', recap_source, filename)\n for filename in filenames\n ]\n return paths_dict\n\n\nclass SummscreenConfig(tfds.core.BuilderConfig):\n \"\"\"BuilderConfig for Summscreen.\"\"\"\n\n def __init__(self, *, recap_source=None, **kwargs):\n \"\"\"BuilderConfig for Summscreen.\n\n Args:\n recap_source: str. The directory for the source of recaps to read.\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n super(SummscreenConfig, self).__init__(**kwargs)\n self.recap_source = recap_source\n\n\nclass Summscreen(tfds.core.GeneratorBasedBuilder):\n \"\"\"DatasetBuilder for non-tokenized, non-anonymized SummScreen dataset.\"\"\"\n\n VERSION = tfds.core.Version('1.0.0')\n RELEASE_NOTES = {\n '1.0.0': 'Initial release.',\n }\n\n BUILDER_CONFIGS = [\n SummscreenConfig(\n name='fd',\n description='ForeverDreaming',\n recap_source='fd',\n ),\n SummscreenConfig(\n name='tms',\n description='TVMegaSite',\n recap_source='tms',\n ),\n ]\n\n def _info(self):\n # Should return a tfds.core.DatasetInfo object\n if self._builder_config.recap_source == 'fd':\n features = tfds.features.FeaturesDict({\n _TRANSCRIPT: tfds.features.Text(),\n _RECAP: tfds.features.Text(),\n 'episode_number': tfds.features.Text(),\n 'episode_title': tfds.features.Text(),\n 'show_title': tfds.features.Text(),\n 'transcript_author': tfds.features.Text(),\n })\n elif self._builder_config.recap_source == 'tms':\n features = tfds.features.FeaturesDict({\n _TRANSCRIPT:\n tfds.features.Text(),\n _RECAP:\n tfds.features.Text(),\n 'episode_summary':\n tfds.features.Text(),\n 'show_title':\n tfds.features.Text(),\n 'transcript_author':\n tfds.features.Tensor(shape=(None,), dtype=tf.string),\n 'recap_author':\n tfds.features.Text(),\n })\n else:\n raise KeyError(\n f'Unknown recap_source {self._builder_config.recap_source}')\n\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=features,\n supervised_keys=(_TRANSCRIPT, _RECAP),\n homepage='https://github.com/mingdachen/SummScreen',\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n dl_paths = dl_manager.download_and_extract(_DL_URLS)\n filenames_dict = _get_filenames_dict(\n tokenized_path=dl_paths['tokenized'],\n recap_source=self._builder_config.recap_source,\n )\n paths_dict = _get_paths_dict(\n untokenized_path=dl_paths['untokenized'],\n recap_source=self._builder_config.recap_source,\n filenames_dict=filenames_dict,\n )\n return {\n 'train': self._generate_examples(paths=paths_dict['train']),\n 'validation': self._generate_examples(paths=paths_dict['dev']),\n 'test': self._generate_examples(paths=paths_dict['test']),\n }\n\n def _generate_examples(self, paths):\n for path in paths:\n example = _load_json(path)\n fname = os.path.basename(path)\n if self._builder_config.recap_source == 'fd':\n yield fname, {\n _TRANSCRIPT: '\\n'.join(example['Transcript']),\n _RECAP: '\\n'.join(example['Recap']),\n 'episode_number': example['Episode Number'],\n 'episode_title': example['Episode Title'],\n 'show_title': example['Show Title'],\n 'transcript_author': example['Transcript Author'],\n }\n elif self._builder_config.recap_source == 'tms':\n yield fname, {\n _TRANSCRIPT: '\\n'.join(example['Transcript']),\n _RECAP: '\\n'.join(example['Recap']),\n 'episode_summary': '\\n'.join(example['Episode Summary']),\n 'show_title': example['Show Title'],\n 'transcript_author': example['Transcript Author'],\n 'recap_author': example['Recap Author'],\n }\n else:\n raise KeyError(\n f'Unknown recap_source {self._builder_config.recap_source}')\n",
"# coding=utf-8\n# Copyright 2022 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"FUSS dataset.\"\"\"\n\nimport os\nfrom absl import logging\nimport tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = r\"\"\"\\\n@inproceedings{wisdom2020fuss,\n title = {What's All the {FUSS} About Free Universal Sound Separation Data?},\n author = {Scott Wisdom and Hakan Erdogan and Daniel P. W. Ellis and Romain Serizel and Nicolas Turpault and Eduardo Fonseca and Justin Salamon and Prem Seetharaman and John R. Hershey},\n year = {2020},\n url = {https://arxiv.org/abs/2011.00803},\n}\n\n@inproceedings{fonseca2020fsd50k,\n author = {Eduardo Fonseca and Xavier Favory and Jordi Pons and Frederic Font Corbera and Xavier Serra},\n title = {{FSD}50k: an open dataset of human-labeled sound events},\n year = {2020},\n url = {https://arxiv.org/abs/2010.00475},\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nThe Free Universal Sound Separation (FUSS) Dataset is a database of arbitrary\nsound mixtures and source-level references, for use in experiments on arbitrary\nsound separation.\n\nThis is the official sound separation data for the DCASE2020 Challenge Task 4:\nSound Event Detection and Separation in Domestic Environments.\n\nOverview: FUSS audio data is sourced from a pre-release of Freesound dataset\nknown as (FSD50k), a sound event dataset composed of Freesound content annotated\nwith labels from the AudioSet Ontology. Using the FSD50K labels, these source\nfiles have been screened such that they likely only contain a single type of\nsound. Labels are not provided for these source files, and are not considered\npart of the challenge. For the purpose of the DCASE Task4 Sound Separation and\nEvent Detection challenge, systems should not use FSD50K labels, even though\nthey may become available upon FSD50K release.\n\nTo create mixtures, 10 second clips of sources are convolved with simulated room\nimpulse responses and added together. Each 10 second mixture contains between\n1 and 4 sources. Source files longer than 10 seconds are considered \"background\"\nsources. Every mixture contains one background source, which is active for the\nentire duration. We provide: a software recipe to create the dataset, the room\nimpulse responses, and the original source audio.\n\"\"\"\n\n_URL = \"https://github.com/google-research/sound-separation/blob/master/datasets/fuss/FUSS_license_doc/README.md\"\n_DL_METADATA = {\n \"reverberant\":\n (\"https://zenodo.org/record/3743844/files/FUSS_ssdata_reverb.tar.gz\",\n \"ssdata_reverb\"),\n \"unprocessed\":\n (\"https://zenodo.org/record/3743844/files/FUSS_ssdata.tar.gz\", \"ssdata\"\n ),\n}\n\n\nclass Fuss(tfds.core.GeneratorBasedBuilder):\n \"\"\"FUSS: Free Universal Sound Separation dataset.\"\"\"\n\n BUILDER_CONFIGS = [\n tfds.core.BuilderConfig(\n name=\"reverberant\",\n description=\"Default reverberated audio.\",\n version=tfds.core.Version(\"1.2.0\")),\n tfds.core.BuilderConfig(\n name=\"unprocessed\",\n description=\"Unprocessed audio without additional reverberation.\",\n version=tfds.core.Version(\"1.2.0\")),\n ]\n\n def _info(self):\n source_labels = [\"background0\", \"foreground0\", \"foreground1\", \"foreground2\"]\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"mixture_audio\":\n tfds.features.Audio(\n file_format=\"wav\",\n shape=(160000,),\n sample_rate=16000,\n dtype=tf.int16),\n \"sources\":\n tfds.features.Sequence({\n \"audio\":\n tfds.features.Audio(\n file_format=\"wav\",\n shape=(160000,),\n sample_rate=16000,\n dtype=tf.int16),\n \"label\":\n tfds.features.ClassLabel(names=source_labels),\n }),\n \"segments\":\n tfds.features.Sequence({\n \"start_time_seconds\": tf.float32,\n \"end_time_seconds\": tf.float32,\n \"label\": tf.string\n }),\n \"jams\":\n tf.string,\n \"id\":\n tf.string,\n }),\n supervised_keys=(\"mixture_audio\", \"sources\"),\n homepage=_URL,\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n url, extracted_dirname = _DL_METADATA[self.builder_config.name]\n base_dir = dl_manager.download_and_extract(url)\n splits = []\n for split_name, split_dir in [(tfds.Split.TRAIN, \"train\"),\n (tfds.Split.VALIDATION, \"validation\"),\n (tfds.Split.TEST, \"eval\")]:\n splits.append(\n tfds.core.SplitGenerator(\n name=split_name,\n gen_kwargs={\n \"base_dir\": os.path.join(base_dir, extracted_dirname),\n \"split\": split_dir,\n }))\n return splits\n\n def _parse_segments(self, path):\n segments = []\n if not tf.io.gfile.exists(path):\n # Some segments files are missing in the \"unprocessed\" set.\n logging.info(\"Missing segments file: %s\", path)\n return segments\n with tf.io.gfile.GFile(path) as f:\n for l in f:\n try:\n start, end, label = l.split()\n except ValueError:\n continue\n segments.append({\n \"start_time_seconds\": float(start),\n \"end_time_seconds\": float(end),\n \"label\": label\n })\n return segments\n\n def _generate_examples(self, base_dir, split):\n \"\"\"Generates examples for the given split.\"\"\"\n path = os.path.join(base_dir, \"%s_example_list.txt\" % split)\n split_dir = os.path.join(base_dir, split)\n with tf.io.gfile.GFile(path) as example_list:\n for line in example_list:\n paths = line.split()\n key = _basename_without_ext(paths[0])\n sources = []\n for p in paths[1:]:\n sources.append({\n \"audio\": os.path.join(base_dir, p),\n \"label\": _basename_without_ext(p).split(\"_\")[0],\n })\n segments = self._parse_segments(os.path.join(split_dir, \"%s.txt\" % key))\n jams = tf.io.gfile.GFile(os.path.join(split_dir,\n \"%s.jams\" % key)).read()\n example = {\n \"mixture_audio\": os.path.join(base_dir, paths[0]),\n \"sources\": sources,\n \"segments\": segments,\n \"jams\": jams,\n \"id\": key,\n }\n yield key, example\n\n\ndef _basename_without_ext(p):\n basename, _ = os.path.splitext(os.path.basename(p))\n return basename\n",
"# coding=utf-8\n# Copyright 2022 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Kitti dataset.\"\"\"\n\nimport collections\nimport csv\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\\\n@inproceedings{Geiger2012CVPR,\n author = {Andreas Geiger and Philip Lenz and Raquel Urtasun},\n title = {Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite},\n booktitle = {Conference on Computer Vision and Pattern Recognition (CVPR)},\n year = {2012}\n}\n\"\"\"\n_DESCRIPTION = \"\"\"\\\nKitti contains a suite of vision tasks built using an autonomous driving\nplatform. The full benchmark contains many tasks such as stereo, optical flow,\nvisual odometry, etc. This dataset contains the object detection dataset,\nincluding the monocular images and bounding boxes. The dataset contains 7481\ntraining images annotated with 3D bounding boxes. A full description of the\nannotations can be found in the readme of the object development kit readme on\nthe Kitti homepage.\n\"\"\"\n_HOMEPAGE_URL = \"http://www.cvlibs.net/datasets/kitti/\"\n_DATA_URL = \"https://s3.eu-central-1.amazonaws.com/avg-kitti\"\n_IMAGES_FNAME = \"data_object_image_2.zip\"\n_LABELS_FNAME = \"data_object_label_2.zip\"\n_DEVKIT_FNAME = \"devkit_object.zip\"\n_OBJECT_LABELS = [\n \"Car\",\n \"Van\",\n \"Truck\",\n \"Pedestrian\",\n \"Person_sitting\",\n \"Cyclist\",\n \"Tram\",\n \"Misc\",\n]\n# The percentage of trainset videos to put into validation and test sets.\n# The released test images do not have labels.\n_VALIDATION_SPLIT_PERCENT_VIDEOS = 10\n_TEST_SPLIT_PERCENT_VIDEOS = 10\n\n# Raw Kitti representation of a bounding box. Coordinates are in pixels,\n# measured from the top-left hand corner.\nRawBoundingBox = collections.namedtuple(\"RawBoundingBox\",\n [\"top\", \"bottom\", \"left\", \"right\"])\n\n\nclass Kitti(tfds.core.GeneratorBasedBuilder):\n \"\"\"Kitti dataset.\"\"\"\n\n VERSION = tfds.core.Version(\"3.2.0\")\n SUPPORTED_VERSIONS = [\n tfds.core.Version(\"3.1.0\"),\n ]\n RELEASE_NOTES = {\"3.2.0\": \"Devkit updated.\"}\n\n def _info(self):\n # Annotation descriptions are in the object development kit.\n annotations = {\n \"type\": tfds.features.ClassLabel(names=_OBJECT_LABELS),\n \"truncated\": tfds.features.Tensor(shape=(), dtype=tf.float32),\n \"occluded\": tfds.features.ClassLabel(num_classes=4),\n \"alpha\": tfds.features.Tensor(shape=(), dtype=tf.float32),\n \"bbox\": tfds.features.BBoxFeature(),\n \"dimensions\": tfds.features.Tensor(shape=(3,), dtype=tf.float32),\n \"location\": tfds.features.Tensor(shape=(3,), dtype=tf.float32),\n \"rotation_y\": tfds.features.Tensor(shape=(), dtype=tf.float32),\n }\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"image\": tfds.features.Image(),\n \"image/file_name\": tfds.features.Text(), # E.g. \"000001.png\".\n \"objects\": tfds.features.Sequence(annotations),\n }),\n homepage=_HOMEPAGE_URL,\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n filenames = {\n \"images\": _DATA_URL + \"/\" + _IMAGES_FNAME,\n \"annotations\": _DATA_URL + \"/\" + _LABELS_FNAME,\n \"devkit\": _DATA_URL + \"/\" + _DEVKIT_FNAME,\n }\n files = dl_manager.download(filenames)\n train_images, validation_images, test_images = _build_splits(\n dl_manager.iter_archive(files[\"devkit\"]))\n\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n \"images\": dl_manager.iter_archive(files[\"images\"]),\n \"annotations\": dl_manager.iter_archive(files[\"annotations\"]),\n \"subdir\": \"training\",\n \"image_ids\": train_images,\n }),\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n gen_kwargs={\n \"images\": dl_manager.iter_archive(files[\"images\"]),\n \"annotations\": dl_manager.iter_archive(files[\"annotations\"]),\n \"subdir\": \"training\",\n \"image_ids\": validation_images,\n }),\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n gen_kwargs={\n \"images\": dl_manager.iter_archive(files[\"images\"]),\n \"annotations\": dl_manager.iter_archive(files[\"annotations\"]),\n \"subdir\": \"training\",\n \"image_ids\": test_images,\n }),\n ]\n\n def _generate_examples(self, images, annotations, subdir, image_ids):\n \"\"\"Yields images and annotations.\n\n Args:\n images: object that iterates over the archive of images.\n annotations: object that iterates over the archive of annotations.\n subdir: subdirectory from which to extract images and annotations, e.g.\n training or testing.\n image_ids: file ids for images in this split.\n\n Yields:\n A tuple containing the example's key, and the example.\n \"\"\"\n cv2 = tfds.core.lazy_imports.cv2\n\n all_annotations = dict()\n for fpath, fobj in annotations:\n prefix, ext = os.path.splitext(fpath)\n if ext != \".txt\":\n continue\n if prefix.split(os.path.sep)[0] != subdir:\n continue\n\n # Key is the datapoint id. E.g. training/label_2/label_000016 -> 16.\n all_annotations[int(prefix[-6:])] = _parse_kitti_annotations(fobj)\n\n for fpath, fobj in images:\n prefix, ext = os.path.splitext(fpath)\n if ext != \".png\":\n continue\n if prefix.split(os.path.sep)[0] != subdir:\n continue\n image_id = int(prefix[-6:])\n if image_id not in image_ids:\n continue\n annotations = all_annotations[image_id]\n img = cv2.imdecode(\n np.frombuffer(fobj.read(), dtype=np.uint8), cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n height, width, _ = img.shape\n for obj in annotations:\n obj[\"bbox\"] = _build_bounding_box(obj[\"bbox_raw\"], height, width)\n del obj[\"bbox_raw\"]\n _, fname = os.path.split(fpath)\n record = {\"image\": img, \"image/file_name\": fname, \"objects\": annotations}\n yield fname, record\n\n\ndef _build_bounding_box(bbox, height, width):\n \"\"\"Builds and returns TFDS bounding box.\n\n Args:\n bbox: RawBoundingBox, bounding box in Kitti coordinates (origin top left).\n height: Image height in pixels.\n width: Image width in pixels.\n\n Returns:\n A TFDS BBox (origin bottom left).\n \"\"\"\n return tfds.features.BBox(\n ymin=(height - bbox.bottom) / height,\n ymax=(height - bbox.top) / height,\n xmin=bbox.left / width,\n xmax=bbox.right / width,\n )\n\n\ndef _parse_kitti_annotations(annotations_csv):\n \"\"\"Loads and parses the Kitti object annotations.\n\n Args:\n annotations_csv: csv file containing annotations for a single image.\n\n Returns:\n A list of labelled bounding boxes. Each bounding box is stored as a\n dictionary of features.\n \"\"\"\n annotations = []\n for line in annotations_csv:\n (obj_type, truncated, occluded, alpha, left, top, right, bottom, height,\n width, length, x, y, z,\n rotation_y) = list(csv.reader([line.decode()], delimiter=\" \"))[0]\n # DontCare objects lack annotations, so skip them.\n if obj_type == \"DontCare\":\n continue\n annotations.append({\n \"type\":\n obj_type,\n \"truncated\":\n float(truncated),\n \"occluded\":\n int(occluded),\n \"alpha\":\n float(alpha),\n \"bbox_raw\":\n RawBoundingBox(\n top=float(top),\n bottom=float(bottom),\n left=float(left),\n right=float(right)),\n \"dimensions\": [float(v) for v in [height, width, length]],\n \"location\": [float(v) for v in [x, y, z]],\n \"rotation_y\":\n float(rotation_y),\n })\n return annotations\n\n\ndef _build_splits(devkit):\n \"\"\"Splits the train data into train/val/test by video.\n\n Ensures that images from the same video do not traverse the splits.\n\n Args:\n devkit: object that iterates over the devkit archive.\n\n Returns:\n train_images: File ids for the training set images.\n validation_images: File ids for the validation set images.\n test_images: File ids for the test set images.\n \"\"\"\n mapping_line_ids = None\n mapping_lines = None\n for fpath, fobj in devkit:\n if fpath == os.path.join(\"mapping\", \"train_rand.txt\"):\n # Converts 1-based line index to 0-based line index.\n mapping_line_ids = [\n int(x.strip()) - 1 for x in fobj.read().decode(\"utf-8\").split(\",\")\n ]\n elif fpath == os.path.join(\"mapping\", \"train_mapping.txt\"):\n mapping_lines = fobj.read().splitlines()\n mapping_lines = [x.decode(\"utf-8\") for x in mapping_lines]\n\n assert mapping_line_ids\n assert mapping_lines\n\n video_to_image = collections.defaultdict(list)\n for image_id, mapping_lineid in enumerate(mapping_line_ids):\n line = mapping_lines[mapping_lineid]\n video_id = line.split(\" \")[1]\n video_to_image[video_id].append(image_id)\n\n # Sets numpy random state.\n numpy_original_state = np.random.get_state()\n np.random.seed(seed=123)\n\n # Max 1 for testing.\n num_test_videos = max(1,\n _TEST_SPLIT_PERCENT_VIDEOS * len(video_to_image) // 100)\n num_validation_videos = max(\n 1,\n _VALIDATION_SPLIT_PERCENT_VIDEOS * len(video_to_image) // 100)\n test_videos = set(\n np.random.choice(\n sorted(list(video_to_image.keys())), num_test_videos, replace=False))\n validation_videos = set(\n np.random.choice(\n sorted(list(set(video_to_image.keys()) - set(test_videos))),\n num_validation_videos,\n replace=False))\n test_images = []\n validation_images = []\n train_images = []\n for k, v in video_to_image.items():\n if k in test_videos:\n test_images.extend(v)\n elif k in validation_videos:\n validation_images.extend(v)\n else:\n train_images.extend(v)\n\n # Resets numpy random state.\n np.random.set_state(numpy_original_state)\n return train_images, validation_images, test_images\n"
] | [
[
"tensorflow.io.gfile.GFile"
],
[
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile"
],
[
"numpy.random.set_state",
"numpy.random.seed",
"numpy.random.get_state"
]
] |
AK391/mt3 | [
"d43c95ccbf9caa08d18e985ca2f2fc7e286a2f66"
] | [
"mt3/datasets.py"
] | [
"# Copyright 2021 The MT3 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Dataset configurations.\"\"\"\n\nimport dataclasses\nfrom typing import Mapping, Sequence, Union\n\nfrom mt3 import note_sequences\nimport tensorflow as tf\n\n\[email protected]\nclass InferEvalSplit:\n # key in dictionary containing all dataset splits\n name: str\n # task name suffix (each eval split is a separate task)\n suffix: str\n # whether or not to include in the mixture of all eval tasks\n include_in_mixture: bool = True\n\n\[email protected]\nclass DatasetConfig:\n \"\"\"Configuration for a transcription dataset.\"\"\"\n # dataset name\n name: str\n # mapping from split name to path\n paths: Mapping[str, str]\n # mapping from feature name to feature\n features: Mapping[str, Union[tf.io.FixedLenFeature,\n tf.io.FixedLenSequenceFeature]]\n # training split name\n train_split: str\n # training eval split name\n train_eval_split: str\n # list of infer eval split specs\n infer_eval_splits: Sequence[InferEvalSplit]\n # list of track specs to be used for metrics\n track_specs: Sequence[note_sequences.TrackSpec] = dataclasses.field(\n default_factory=list)\n\nMAESTROV1_CONFIG = DatasetConfig(\n name='maestrov1',\n paths={\n 'train':\n 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-?????-of-00010',\n 'train_subset':\n 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_train.tfrecord-00002-of-00010',\n 'validation':\n 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-?????-of-00010',\n 'validation_subset':\n 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_validation.tfrecord-0000[06]-of-00010',\n 'test':\n 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_ns_wav_test.tfrecord-?????-of-00010'\n },\n features={\n 'audio': tf.io.FixedLenFeature([], dtype=tf.string),\n 'sequence': tf.io.FixedLenFeature([], dtype=tf.string),\n 'id': tf.io.FixedLenFeature([], dtype=tf.string)\n },\n train_split='train',\n train_eval_split='validation_subset',\n infer_eval_splits=[\n InferEvalSplit(name='train', suffix='eval_train_full',\n include_in_mixture=False),\n InferEvalSplit(name='train_subset', suffix='eval_train'),\n InferEvalSplit(name='validation', suffix='validation_full',\n include_in_mixture=False),\n InferEvalSplit(name='validation_subset', suffix='validation'),\n InferEvalSplit(name='test', suffix='test', include_in_mixture=False)\n ])\n\n\nMAESTROV3_CONFIG = DatasetConfig(\n name='maestrov3',\n paths={\n 'train':\n 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-?????-of-00025',\n 'train_subset':\n 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_train.tfrecord-00004-of-00025',\n 'validation':\n 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-?????-of-00025',\n 'validation_subset':\n 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_validation.tfrecord-0002?-of-00025',\n 'test':\n 'gs://magentadata/datasets/maestro/v3.0.0/maestro-v3.0.0_ns_wav_test.tfrecord-?????-of-00025'\n },\n features={\n 'audio': tf.io.FixedLenFeature([], dtype=tf.string),\n 'sequence': tf.io.FixedLenFeature([], dtype=tf.string),\n 'id': tf.io.FixedLenFeature([], dtype=tf.string)\n },\n train_split='train',\n train_eval_split='validation_subset',\n infer_eval_splits=[\n InferEvalSplit(name='train', suffix='eval_train_full',\n include_in_mixture=False),\n InferEvalSplit(name='train_subset', suffix='eval_train'),\n InferEvalSplit(name='validation', suffix='validation_full',\n include_in_mixture=False),\n InferEvalSplit(name='validation_subset', suffix='validation'),\n InferEvalSplit(name='test', suffix='test', include_in_mixture=False)\n ])\n\n\nGUITARSET_CONFIG = DatasetConfig(\n name='guitarset',\n paths={\n 'train':\n 'gs://mt3/data/datasets/guitarset/train.tfrecord-?????-of-00019',\n 'validation':\n 'gs://mt3/data/datasets/guitarset/validation.tfrecord-?????-of-00006',\n },\n features={\n 'sequence': tf.io.FixedLenFeature([], dtype=tf.string),\n 'audio': tf.io.FixedLenFeature([], dtype=tf.string),\n 'velocity_range': tf.io.FixedLenFeature([], dtype=tf.string),\n 'id': tf.io.FixedLenFeature([], dtype=tf.string),\n },\n train_split='train',\n train_eval_split='validation',\n infer_eval_splits=[\n InferEvalSplit(name='train', suffix='eval_train'),\n InferEvalSplit(name='validation', suffix='validation'),\n ])\n\n\nURMP_CONFIG = DatasetConfig(\n name='urmp',\n paths={\n 'train': 'gs://mt3/data/datasets/urmp/train.tfrecord',\n 'validation': 'gs://mt3/data/datasets/urmp/validation.tfrecord',\n },\n features={\n 'id': tf.io.FixedLenFeature([], dtype=tf.string),\n 'tracks': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.int64, allow_missing=True),\n 'inst_names': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.string, allow_missing=True),\n 'audio': tf.io.FixedLenFeature([], dtype=tf.string),\n 'sequence': tf.io.FixedLenFeature([], dtype=tf.string),\n 'instrument_sequences': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.string, allow_missing=True),\n },\n train_split='train',\n train_eval_split='validation',\n infer_eval_splits=[\n InferEvalSplit(name='train', suffix='eval_train'),\n InferEvalSplit(name='validation', suffix='validation')\n ])\n\n\nMUSICNET_CONFIG = DatasetConfig(\n name='musicnet',\n paths={\n 'train':\n 'gs://mt3/data/datasets/musicnet/musicnet-train.tfrecord-?????-of-00036',\n 'validation':\n 'gs://mt3/data/datasets/musicnet/musicnet-validation.tfrecord-?????-of-00005',\n 'test':\n 'gs://mt3/data/datasets/musicnet/musicnet-test.tfrecord-?????-of-00003'\n },\n features={\n 'id': tf.io.FixedLenFeature([], dtype=tf.string),\n 'sample_rate': tf.io.FixedLenFeature([], dtype=tf.float32),\n 'audio': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.float32, allow_missing=True),\n 'sequence': tf.io.FixedLenFeature([], dtype=tf.string)\n },\n train_split='train',\n train_eval_split='validation',\n infer_eval_splits=[\n InferEvalSplit(name='train', suffix='eval_train'),\n InferEvalSplit(name='validation', suffix='validation'),\n InferEvalSplit(name='test', suffix='test', include_in_mixture=False)\n ])\n\n\nCERBERUS4_CONFIG = DatasetConfig(\n name='cerberus4',\n paths={\n 'train':\n 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-?????-of-00286',\n 'train_subset':\n 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_train_bass:drums:guitar:piano.tfrecord-00000-of-00286',\n 'validation':\n 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-?????-of-00212',\n 'validation_subset':\n 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_validation_bass:drums:guitar:piano.tfrecord-0000?-of-00212',\n 'test':\n 'gs://mt3/data/datasets/cerberus4/slakh_multi_cerberus_test_bass:drums:guitar:piano.tfrecord-?????-of-00106'\n },\n features={\n 'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64),\n 'inst_names': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.string, allow_missing=True),\n 'midi_class': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.int64, allow_missing=True),\n 'mix': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.float32, allow_missing=True),\n 'note_sequences': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.string, allow_missing=True),\n 'plugin_name': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.int64, allow_missing=True),\n 'program_num': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.int64, allow_missing=True),\n 'slakh_class': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.int64, allow_missing=True),\n 'src_ids': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.string, allow_missing=True),\n 'stems': tf.io.FixedLenSequenceFeature(\n [], dtype=tf.float32, allow_missing=True),\n 'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64),\n 'target_type': tf.io.FixedLenFeature([], dtype=tf.string),\n 'track_id': tf.io.FixedLenFeature([], dtype=tf.string),\n },\n train_split='train',\n train_eval_split='validation_subset',\n infer_eval_splits=[\n InferEvalSplit(name='train', suffix='eval_train_full',\n include_in_mixture=False),\n InferEvalSplit(name='train_subset', suffix='eval_train'),\n InferEvalSplit(name='validation', suffix='validation_full',\n include_in_mixture=False),\n InferEvalSplit(name='validation_subset', suffix='validation'),\n InferEvalSplit(name='test', suffix='test', include_in_mixture=False)\n ],\n track_specs=[\n note_sequences.TrackSpec('bass', program=32),\n note_sequences.TrackSpec('drums', is_drum=True),\n note_sequences.TrackSpec('guitar', program=24),\n note_sequences.TrackSpec('piano', program=0)\n ])\n\n\nSLAKH_CONFIG = DatasetConfig(\n name='slakh',\n paths={\n 'train':\n 'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-?????-of-02307',\n 'train_subset':\n 'gs://mt3/data/datasets/slakh/slakh_multi_full_subsets_10_train_all_inst.tfrecord-00000-of-02307',\n 'validation':\n 'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-?????-of-00168',\n 'validation_subset':\n 'gs://mt3/data/datasets/slakh/slakh_multi_full_validation_all_inst.tfrecord-0000?-of-00168',\n 'test':\n 'gs://mt3/data/datasets/slakh/slakh_multi_full_test_all_inst.tfrecord-?????-of-00109'\n },\n features={\n 'audio_sample_rate': tf.io.FixedLenFeature([], dtype=tf.int64),\n 'inst_names': tf.io.FixedLenSequenceFeature([], dtype=tf.string,\n allow_missing=True),\n 'midi_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,\n allow_missing=True),\n 'mix': tf.io.FixedLenSequenceFeature([], dtype=tf.float32,\n allow_missing=True),\n 'note_sequences': tf.io.FixedLenSequenceFeature([], dtype=tf.string,\n allow_missing=True),\n 'plugin_name': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,\n allow_missing=True),\n 'program_num': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,\n allow_missing=True),\n 'slakh_class': tf.io.FixedLenSequenceFeature([], dtype=tf.int64,\n allow_missing=True),\n 'src_ids': tf.io.FixedLenSequenceFeature([], dtype=tf.string,\n allow_missing=True),\n 'stems': tf.io.FixedLenSequenceFeature([], dtype=tf.float32,\n allow_missing=True),\n 'stems_shape': tf.io.FixedLenFeature([2], dtype=tf.int64),\n 'target_type': tf.io.FixedLenFeature([], dtype=tf.string),\n 'track_id': tf.io.FixedLenFeature([], dtype=tf.string),\n },\n train_split='train',\n train_eval_split='validation_subset',\n infer_eval_splits=[\n InferEvalSplit(name='train', suffix='eval_train_full',\n include_in_mixture=False),\n InferEvalSplit(name='train_subset', suffix='eval_train'),\n InferEvalSplit(name='validation', suffix='validation_full',\n include_in_mixture=False),\n InferEvalSplit(name='validation_subset', suffix='validation'),\n InferEvalSplit(name='test', suffix='test', include_in_mixture=False)\n ])\n"
] | [
[
"tensorflow.io.FixedLenFeature",
"tensorflow.io.FixedLenSequenceFeature"
]
] |
Leonardodsch/house-rocket-insights | [
"dd8405b776e223ec5ff8392a027d4b0116fcd7ca"
] | [
"house_rocket_app.py"
] | [
"import pandas as pd\nimport numpy as np\nimport streamlit as st\nimport plotly.express as px\nimport ipywidgets as widgets\nfrom ipywidgets import fixed\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set_style('whitegrid')\n\nst.set_page_config(layout='wide')\n\[email protected](allow_output_mutation=True)\ndef get_data(path):\n data = pd.read_csv(path)\n\n return data\n\ndef barplot(a,b, aux):\n plot = sns.barplot(x=a, y=b, data=aux, edgecolor='k', palette='Blues')\n sns.despine()\n return plot\n\n# get data\npath = 'data/df_sugestions01.csv'\npath2 = 'data/df_sugestions02.csv'\npath3 = 'data/df_full.csv'\n\ndata = get_data(path)\ndf = get_data(path2)\ndf1 = get_data(path3)\n\nst.sidebar.write()\nf_zipcode = st.sidebar.multiselect('Select Zipcode', data['zipcode'].unique())\nf_condition = st.sidebar.multiselect('Select Condition', data['condition'].sort_values(ascending=True).unique())\nf_buy = st.sidebar.multiselect('Select buy option', data['buy'].unique())\nf_season = st.sidebar.multiselect('Select season', df['season'].unique())\n\nmin_price = int(df['price'].min())\nmax_price = int(df['price'].max())\nmedian_price = int(df['price'].median())\n\n\n\nst.title('House Rocket')\nst.write('A House Rocket é uma empresa focada na compra e venda de imóveis, buscando avaliar e encontrar bons negócios para constituir seu portfólio e oferecer também bons'\n ' negocios para seus clientes. Diante disso foi realizada uma análise onde diversos imóveis foram explorados e avaliados buscando o que poderia se tornar uma boa oportunidade para a empresa'\n ' e alguns insights interessantes foram descobertos, algo que se tornará de extremo valor caso seja bem utilizado.'\n 'Para detalhes mais técnicos e visualização do projeto completo acessar:' ' [GitHub](https://github.com/Leonardodsch/house-rocket-insights)')\n\nst.title('Business Questions')\nst.write('As tabelas são interativas e podem ser filtradas a partir das opções na barra lateral, permitindo assim que os imóveis'\n ' possam ser exibidos de acordo com a preferência.')\nst.header(' Quais são os imóveis que a House Rocket deveria comprar e por qual preço ?')\nst.write(' Na primeita tabela estão os imóveis agrupados por região (zipcode), com os preços médios de cada região. Estes são avaliados juntamente com o valor'\n ' da coluna condition de cada imóvel, para assim ser feita uma sugestão de compra ou não')\nst.header(' Uma vez a casa comprada, qual o melhor momento para vendê-las e por qual preço ?')\nst.write('Na segunda tabela é possivel filtrar os imóveis pela região mas também pela sazonalidade, o que permite ver as melhores opções de compra em cada estação do ano'\n ' e o valor da venda baseado nas premissas de assumidas no começo do projeto')\n\n\n\nif (f_zipcode != []) & (f_condition == []) & (f_buy == []) & (f_season == []):\n st.write(data.loc[data['zipcode'].isin(f_zipcode)])\n st.write(df.loc[(df['zipcode'].isin(f_zipcode))])\n\nelif (f_condition != []) & (f_zipcode != []) & (f_buy != []) & (f_season != []):\n st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])\n st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])\n\nelif (f_condition != []) & (f_zipcode == []) & (f_buy == []) & (f_season == []):\n st.write(data.loc[data['condition'].isin(f_condition)])\n st.dataframe(df)\n\nelif (f_buy != []) & (f_zipcode == []) & (f_condition == []) & (f_season == []):\n st.write(data.loc[data['buy'].isin(f_buy)])\n st.dataframe(df)\n\nelif (f_condition != []) & (f_zipcode != []) & (f_buy == []) & (f_season != []):\n st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode))])\n st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])\n\nelif (f_condition == []) & (f_zipcode != []) & (f_buy != []) & (f_season == []):\n st.write(data.loc[(data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])\n st.write(df.loc[(df['zipcode'].isin(f_zipcode))])\n\nelif (f_season != []) & (f_zipcode == []) & (f_buy == []) & (f_condition == []):\n st.dataframe(data, height=400, width=700)\n st.write(df.loc[(df['season'].isin(f_season))])\n\nelif (f_season != []) & (f_zipcode == []) & (f_buy != []) & (f_condition == []):\n st.write(data.loc[data['buy'].isin(f_buy)])\n st.write(df.loc[df['season'].isin(f_season)])\n\nelif (f_season != []) & (f_zipcode == []) & (f_buy == []) & (f_condition != []):\n st.write(data.loc[data['condition'].isin(f_condition)])\n st.write(df.loc[df['season'].isin(f_season)])\n\nelif (f_season != []) & (f_zipcode == []) & (f_buy != []) & (f_condition != []):\n st.write(data.loc[data['condition'].isin(f_condition) & (data['buy'].isin(f_buy))])\n st.write(df.loc[df['season'].isin(f_season)])\n\nelif (f_zipcode != []) & (f_condition == []) & (f_buy == []) & (f_season != []):\n st.write(data.loc[data['zipcode'].isin(f_zipcode)])\n st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])\n\nelif (f_condition == []) & (f_zipcode != []) & (f_buy != []) & (f_season != []):\n st.write(data.loc[(data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])\n st.write(df.loc[(df['season'].isin(f_season)) & (df['zipcode'].isin(f_zipcode))])\n\nelif (f_condition != []) & (f_zipcode != []) & (f_buy == []) & (f_season == []):\n st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode))])\n st.write(df.loc[(df['zipcode'].isin(f_zipcode))])\n\nelif (f_condition != []) & (f_zipcode != []) & (f_buy != []) & (f_season == []):\n st.write(data.loc[(data['condition'].isin(f_condition)) & (data['zipcode'].isin(f_zipcode)) & (data['buy'].isin(f_buy))])\n st.write(df.loc[(df['zipcode'].isin(f_zipcode))])\n\nelse:\n data = data.copy()\n df = df.copy()\n st.dataframe(data, height=400, width=700)\n st.dataframe(df)\n\nst.header('Mapa com as indicações de compra')\nis_check = st.checkbox('Show Map')\n\n\nif is_check:\n\n selected_price_range = st.slider('Select the price range', min_price, max_price, median_price)\n buy_select = st.multiselect('Buy option', df1['buy'].unique())\n\n if (buy_select != []):\n # select rows\n houses = df1[(df1['price'] < selected_price_range) & (df1['buy'].isin(buy_select))][['id','zipcode','price','median_price','condition', 'lat', 'long']]\n # draw map\n fig = px.scatter_mapbox(\n houses,\n lat='lat',\n lon='long',\n color=\"condition\",\n size=\"price\",\n color_continuous_scale=px.colors.cyclical.IceFire,\n size_max=15,\n zoom=10 )\n\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(height=600, margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n st.plotly_chart(fig)\n\n else:\n # select rows\n houses = df1[['id','zipcode','price','median_price','condition', 'lat', 'long']].copy()\n # draw map\n fig = px.scatter_mapbox(\n houses,\n lat='lat',\n lon='long',\n color=\"condition\",\n size=\"price\",\n color_continuous_scale=px.colors.cyclical.IceFire,\n size_max=15,\n zoom=10 )\n\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(height=600, margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n st.plotly_chart(fig)\n\nst.title('Business Hypothesis')\n\n# H1\nst.header('H1: Imóveis que possuem vista para água, são 30% mais caros, na média')\nst.text('Falsa! Imóveis com vista para a agua são 200% mais caros na mádia')\naux = df1[['price','waterfront']].groupby('waterfront').mean().reset_index()\nfig = plt.figure(figsize=(9,3))\nbarplot('waterfront','price',aux)\nst.pyplot(fig)\n\n#H2\nst.header('H2: Imóveis com data de construção menor que 1955, são 50% mais baratos, na média')\nst.text('Falsa! Imóveis com data de construção menot do que 1955 são aproximadamente 1,6% mais baratos')\naux2 = df1[['price','yr_built']].copy()\naux2['yr_built'] = aux2['yr_built'].apply(lambda x: '<= 1955' if x <= 1955 else '> 1955')\naux = aux2[['price','yr_built']].groupby('yr_built').mean().reset_index()\nfig2 = plt.figure(figsize=(9,3))\nbarplot('yr_built','price',aux)\nst.pyplot(fig2)\n\n# Evolution over the year\nst.header('Evolution over the years')\naux = df1[['price','yr_built']].loc[df1['yr_built'] <= 1955].groupby('yr_built').mean().reset_index()\naux2 = df1[['price','yr_built']].loc[df1['yr_built'] > 1955].groupby('yr_built').mean().reset_index()\n\nfig_ = plt.figure(figsize=(15,7))\nplt.subplot(2,1,1)\nbarplot('yr_built','price', aux)\nplt.xticks(rotation=60);\nplt.title('Yr_built <= 1955')\n\nplt.subplot(2,1,2)\nbarplot('yr_built','price',aux2)\nplt.xticks(rotation=60);\nplt.title('Yr_built > 1955')\nplt.tight_layout()\nst.pyplot(fig_)\n\n#H3\nst.header('H3: Imóveis sem porão possuem area total (sqrt_lot), são 50% maiores do que com porão')\nst.text('Falsa! Imóveis sem porão possuem uma area total 23% maior')\naux = df1[['sqft_basement','sqft_lot']].copy()\naux['sqft_basement'] = aux['sqft_basement'].apply(lambda x: 'yes' if x != 0 else 'no')\naux1 = aux[['sqft_basement','sqft_lot']].groupby('sqft_basement').mean().reset_index()\naux1.sort_values(by='sqft_lot', ascending=True, inplace=True)\nfig3 = plt.figure(figsize=(9,3))\nbarplot('sqft_basement','sqft_lot',aux1)\nst.pyplot(fig3)\n\n#4\nst.header('H4: O crescimento do preço dos imóveis YoY ( Year over Year ) é de 10%')\nst.text('Falsa O crescimento do preço dos imoveis YoY é de 2%')\naux = df1[['price','year']].loc[df1['month'] == 5].copy()\naux1 = aux[['price','year']].groupby('year').mean().reset_index()\nfig4 = plt.figure(figsize=(9,3))\nbarplot('year','price',aux1)\nst.pyplot(fig4)\n\n#5\nst.header('H5: Imóveis com 3 banheiros tem um crescimento MoM ( Month over Month ) de 15%')\nst.text('Falsa! Imóveis com 3 banheiros não possuem um crescimento MoM de 15%')\naux = df1[['price','month']].loc[df1['bathrooms'] == 3].groupby(['month']).mean().reset_index()\naux['growth'] = aux['price'].pct_change()\nfig5 = plt.figure(figsize=(9,3))\nplt.subplot(2,1,1)\nplt.plot('month','price', data=aux)\nplt.ylabel('Price')\nplt.subplot(2,1,2)\nbarplot('month','growth',aux)\nst.pyplot(fig5)\n\n#6\nst.header('H6: Imóveis com 3 ou mais banheiros são 30% mais caros, na média')\nst.text('Falsa! Impoveis com 3 ou mais banheiros são 100% mais caros na média')\naux = df1[['bathrooms','price']].copy()\naux['bathrooms'] = aux['bathrooms'].apply(lambda x: '>= 3' if x >=3 else '< 3')\naux1 = aux[['price','bathrooms']].groupby('bathrooms').mean().reset_index()\nfig6 = plt.figure(figsize=(9,3))\nbarplot('bathrooms','price',aux1)\nst.pyplot(fig6)\n\n#7\nst.header('H7: Imóveis com condition igual ou maior do que 4 são 40% mais caros, na média')\nst.text('Falsa! Imóveis com condition igual ou maior do que 4 são 0,5% mais caros, na média')\naux = df1[['price','condition']].copy()\naux['condition'] = aux['condition'].apply(lambda x: '< 4' if x < 4 else '>= 4')\naux1 = aux[['price','condition']].groupby('condition').mean().reset_index()\nfig7 = plt.figure(figsize=(9,3))\nbarplot('condition','price',aux1)\nst.pyplot(fig7)\n\n#8\nst.header('H8: Imóveis vendidos no inverno são 30% mais baratos na média do que imóveis vendidos no verão')\nst.text('Falsa! Imóveis vendidos no inverno são 4% mais baratos na média do que imóveis vendidos no verão')\naux = df1[['price','season']].loc[(df1['season'] == 'winter') | (df1['season'] == 'summer') ].copy()\naux1 = aux[['price','season']].groupby('season').mean().reset_index()\naux1.sort_values(by='price', ascending=True, inplace=True)\nfig8 = plt.figure(figsize=(9,3))\nbarplot('season','price',aux1)\nst.pyplot(fig8)\n\n#9\nst.header('H9: Imóveis com mais de 400m2 (m2_living) são 50% mais caros na media')\nst.text('Falsa! Imóveis com mais de 400m2 são 230% mais caros na média')\naux = df1[['price','m2_living']].copy()\naux['m2_living'] = aux['m2_living'].apply(lambda x: '< 400' if x < 400 else '> 400')\naux1= aux[['price','m2_living']].groupby('m2_living').mean().reset_index()\nfig9 = plt.figure(figsize=(9,3))\nbarplot('m2_living','price',aux1)\nst.pyplot(fig9)\n\n#10\nst.header('H10: Imóveis com menos de 100m2 tem um crescimento Mom ( Month over Month ) de 20%')\nst.text('Falsa! Imóveis com menos de 100m2 não possuem um crescimento MoM de 20%')\naux = df1[['price','month']].loc[df1['m2_living'] < 100 ].groupby('month').mean().reset_index()\naux['growth'] = aux['price'].pct_change()\nfig10 = plt.figure(figsize=(9,3))\nplt.subplot(2,1,1)\nplt.plot('month','price', data=aux)\nplt.ylabel('Price')\nplt.subplot(2,1,2)\nbarplot('month','growth',aux)\nst.pyplot(fig10)\n\n#11\nst.header('H11: Imóveis com 4 ou mais quartos são 50% mais caros, na média')\nst.text('Verdadeira! Imóveis com 4 ou mais quartos são 50% mais caros, na média')\naux = df1[['bedrooms','price']].copy()\naux['bedrooms'] = aux['bedrooms'].apply(lambda x: '< 4' if x < 4 else '>= 4')\naux1= aux[['price','bedrooms']].groupby('bedrooms').mean().reset_index()\nfig11 = plt.figure(figsize=(9,3))\nbarplot('bedrooms','price',aux1)\nst.pyplot(fig11)\n\n"
] | [
[
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot"
]
] |
OSSDC/OSSDC-VisionBasedACC | [
"a9004c888e91b8becaebc22524f698ebb3c9746e"
] | [
"object_detection/test_mobilenet.py"
] | [
"import numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nimport cv2\n\nfrom webcamvideostream import *\n\nvideoUrl = 1\nvideoUrl = \"/sharefolder/sdc/sdc-data/ossdc-simulator-TheCrew-PS4-30fps.mp4\"\n\nwebcam = False\n#webcam = True\n\nsct = None\n\nret = True\n\nif webcam:\n cap = WebcamVideoStream(videoUrl,(1280,720),30)\n cap.start()\nelse:\n cap = cv2.VideoCapture(videoUrl)\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n\n# ## Object detection imports\n# Here are the imports from the object detection module.\n\n# In[3]:\n\nfrom utils import label_map_util\n\nfrom utils import visualization_utils as vis_util\n\n\n# # Model preparation \n\n# ## Variables\n# \n# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file. \n# \n# By default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.\n\n# In[4]:\n\n# What model to download.\nMODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'\nMODEL_FILE = MODEL_NAME + '.tar.gz'\nDOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n\nNUM_CLASSES = 90\n\n\n# ## Download Model\n\n# In[5]:\n'''\nopener = urllib.request.URLopener()\nopener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)\ntar_file = tarfile.open(MODEL_FILE)\nfor file in tar_file.getmembers():\n file_name = os.path.basename(file.name)\n if 'frozen_inference_graph.pb' in file_name:\n tar_file.extract(file, os.getcwd())\n'''\n\n# ## Load a (frozen) Tensorflow model into memory.\n\n# In[6]:\n\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\n# ## Loading label map\n# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine\n\n# In[7]:\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n\n# ## Helper code\n\n# In[8]:\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\n# # Detection\n\n# In[9]:\n\n# For the sake of simplicity we will use only 2 images:\n# image1.jpg\n# image2.jpg\n# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\nPATH_TO_TEST_IMAGES_DIR = 'test_images'\nTEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]\n\n# Size, in inches, of the output images.\nIMAGE_SIZE = (12, 8)\n\n# In[10]:\n\nfrom datetime import datetime\n\nprecision = 10\ndef getCurrentClock():\n #return time.clock()\n return datetime.now()\n\nframeCnt=0\nprevFrameCnt=0\nprevTime=getCurrentClock()\n\nwith detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n while True:\n if sct is not None or webcam or cap.grab():\n if sct is not None:\n frame = numpy.asarray(sct.grab(mon))\n else:\n if webcam:\n frame = cap.read()\n else:\n flag, frame = cap.retrieve() \n if not flag:\n continue \n image_np = frame\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n # Actual detection.\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8)\n \n frameCnt=frameCnt+1\n nowMicro = getCurrentClock()\n delta = (nowMicro-prevTime).total_seconds()\n\n if delta>=1.0:\n fpsValue = ((frameCnt-prevFrameCnt)/delta) \n print(\"FPS = %3.2f, Frame = %6d\" % (fpsValue, frameCnt))\n prevFrameCnt=frameCnt\n\n cv2.imshow('object detection', cv2.resize(image_np, (800,600)))\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break"
] | [
[
"numpy.squeeze",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.Session",
"tensorflow.import_graph_def",
"tensorflow.GraphDef"
]
] |
UBC-MDS/eazieda | [
"214f3907e71ddbaa1b64c7a201cb0f07661263ac"
] | [
"tests/test_missing_impute.py"
] | [
"from eazieda.missing_impute import missing_impute\nimport pandas as pd\nimport numpy as np\nfrom pytest import raises, fixture\n\n\n@fixture\ndef df_miss():\n df = pd.DataFrame(\n [[1.0, \"x\"], [np.nan, \"y\"], [2.0, np.nan], [3.0, \"y\"]],\n columns=[\"a\", \"b\"],\n )\n return df\n\n\n@fixture\ndef df_miss_2():\n df = pd.DataFrame(\n [[1.0, \"x\"], [np.nan, \"y\"], [2.0, np.nan], [3.0, \"y\"], [4.0, \"y\"]],\n columns=[\"a\", \"b\"],\n )\n return df\n\n\ndef test_missing_impute(df_miss, df_miss_2):\n\n # Test with default arguments\n expected_output_default = pd.DataFrame(\n data={\"a\": [1.0, 2.0, 2.0, 3.0], \"b\": [\"x\", \"y\", \"y\", \"y\"]}\n ).reset_index(drop=True)\n\n missing_output_default = missing_impute(df_miss)\n\n assert pd.DataFrame.equals(missing_output_default, expected_output_default)\n\n # Test with two drop arguments selected at the same time\n expected_output_two_drop = pd.DataFrame(\n data={\"a\": [1.0, 3.0], \"b\": [\"x\", \"y\"]}\n ).reset_index(drop=True)\n\n missing_output_two_drop = missing_impute(\n df_miss, method_num=\"drop\", method_non_num=\"drop\"\n )\n\n assert pd.DataFrame.equals(\n missing_output_two_drop, expected_output_two_drop\n )\n\n # Test with method_num=\"mean\", method_non_num=\"drop\"\n expected_output_one_drop = pd.DataFrame(\n data={\"a\": [1.0, 2.0, 3.0], \"b\": [\"x\", \"y\", \"y\"]}\n ).reset_index(drop=True)\n\n missing_output_one_drop = missing_impute(df_miss, method_non_num=\"drop\")\n\n assert pd.DataFrame.equals(\n expected_output_one_drop, missing_output_one_drop\n )\n\n # Test with method_num=\"median\", method_non_num=\"most_frequent\"\n expected_output_median = pd.DataFrame(\n data={\"a\": [1.0, 2.0, 2.0, 3.0], \"b\": [\"x\", \"y\", \"y\", \"y\"]}\n ).reset_index(drop=True)\n missing_output_median = missing_impute(df_miss, method_num=\"median\")\n\n assert pd.DataFrame.equals(missing_output_median, expected_output_median)\n\n # Test with method_num=\"median\", method_non_num=\"drop\"\n expected_output_median_drop = pd.DataFrame(\n data={\"a\": [1.0, 2.0, 3.0], \"b\": [\"x\", \"y\", \"y\"]}\n ).reset_index(drop=True)\n missing_output_median_drop = missing_impute(\n df_miss, method_num=\"median\", method_non_num=\"drop\"\n )\n\n assert pd.DataFrame.equals(\n missing_output_median_drop, expected_output_median_drop\n )\n\n # Test with method_num=\"drop\", method_non_num=\"most_frequent\"\n expected_output_drop_freq = pd.DataFrame(\n [[1.0, \"x\"], [2.0, \"y\"], [3.0, \"y\"], [4.0, \"y\"]], columns=[\"a\", \"b\"],\n ).reset_index(drop=True)\n missing_output_drop_freq = missing_impute(\n df_miss_2, method_num=\"drop\", method_non_num=\"most_frequent\"\n )\n\n assert pd.DataFrame.equals(\n missing_output_drop_freq, expected_output_drop_freq\n )\n\n # Test whether a not dataframe input raises TypeError\n with raises(TypeError):\n missing_impute(5)\n\n # Test whether invaild input of method_num raises ValueError\n with raises(ValueError):\n missing_impute(df_miss, method_num=\"mea\")\n\n # Test whether invaild input of method_non_num raises ValueError\n with raises(ValueError):\n missing_impute(df_miss, method_num=\"mean\", method_non_num=\"most_freq\")\n"
] | [
[
"pandas.DataFrame.equals",
"pandas.DataFrame"
]
] |
hui2000ji/scETM | [
"0a34c345d70b262ebc38e033bae683fa4929ed3e"
] | [
"src/scETM/models/BatchClassifier.py"
] | [
"from typing import Sequence, Mapping\nfrom numpy import mod\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nfrom .model_utils import get_fully_connected_layers\nfrom scETM.logging_utils import log_arguments\n\nclass BatchClassifier(nn.Module):\n \"\"\"Docstring (TODO)\n \"\"\"\n\n @log_arguments\n def __init__(self,\n n_input: int,\n n_output: int,\n hidden_sizes: Sequence[int],\n bn: bool = False,\n bn_track_running_stats: bool = False,\n dropout_prob = 0.2,\n adversarial_loss = 'confuse',\n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n ) -> None:\n \"\"\"Docstring (TODO)\n \"\"\"\n\n super().__init__()\n\n self.batch_clf = get_fully_connected_layers(\n n_trainable_input=n_input,\n n_trainable_output=n_output,\n hidden_sizes=hidden_sizes,\n bn=bn,\n bn_track_running_stats=bn_track_running_stats,\n dropout_prob=dropout_prob,\n ).to(device)\n self.n_output = n_output\n assert adversarial_loss in ('confuse', 'reverse')\n self.adversarial_loss = adversarial_loss\n\n def forward(self, X: torch.Tensor, y: torch.Tensor) -> Mapping[str, torch.Tensor]:\n \"\"\"Docstring (TODO)\n \"\"\"\n\n logit = self.batch_clf(X)\n if not self.training:\n return dict(logit=logit)\n\n clf_loss = F.cross_entropy(logit, y)\n if self.adversarial_loss == 'confuse':\n model_loss = (-F.log_softmax(logit, dim=-1) * torch.zeros_like(logit).fill_(1/self.n_output)).sum(-1).mean()\n else:\n model_loss = -clf_loss\n return clf_loss, dict(logit=logit, model_loss=model_loss), dict(clf_loss=clf_loss.detach().item())\n\n def train_step(self,\n optimizer: optim.Optimizer,\n X: torch.Tensor,\n y: torch.Tensor\n ) -> Mapping[str, torch.Tensor]:\n \"\"\"Docstring (TODO)\n \"\"\"\n\n self.train()\n optimizer.zero_grad()\n loss, fwd_dict, new_records = self(X, y)\n loss.backward()\n optimizer.step()\n new_records['clf_acc'] = (fwd_dict['logit'].argmax(1) == y).to(torch.float).mean().detach().item()\n return new_records"
] | [
[
"torch.nn.functional.log_softmax",
"torch.cuda.is_available",
"torch.zeros_like",
"torch.nn.functional.cross_entropy"
]
] |
traindb-project/traindb-model | [
"9ffdb8c0195051630692dbe6dfd8b9fe816a619f"
] | [
"models/TVAE.py"
] | [
"\"\"\"\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging\nimport rdt\nimport sdv\nfrom TrainDBBaseModel import TrainDBSynopsisModel\nimport pandas as pd\n\nimport torch\n\nLOGGER = logging.getLogger(__name__)\n\nclass TVAE(TrainDBSynopsisModel):\n\n def train(self, real_data, table_metadata):\n self.columns, _ = self.get_columns(real_data, table_metadata)\n\n LOGGER.info(\"Training %s\", self.__class__.__name__)\n model_kwargs = {}\n self.model = sdv.tabular.TVAE(table_metadata=table_metadata, **model_kwargs)\n self.model.fit(real_data)\n\n def save(self, output_path):\n self.model.save(output_path + '/model.pkl')\n torch.save({\n 'columns': self.columns\n }, output_path + '/model_info.pth')\n\n def load(self, input_path):\n self.model = sdv.tabular.TVAE.load(input_path + '/model.pkl')\n saved_model_info = torch.load(input_path + '/model_info.pth')\n self.columns = saved_model_info['columns']\n\n def synopsis(self, row_count):\n LOGGER.info(\"Synopsis Generating %s\", self.__class__.__name__)\n synthetic_data = self.model.sample(row_count)\n synthetic_data = pd.DataFrame(synthetic_data, columns=self.columns)\n\n return synthetic_data\n"
] | [
[
"pandas.DataFrame",
"torch.save",
"torch.load"
]
] |
dan-zheng/tensorflow | [
"5e04065935920b0a07175283408297e73d2191fb"
] | [
"tensorflow/python/keras/engine/base_layer_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TensorFlow 2.0 layer behavior.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport itertools as it\nimport os\nimport sys\nimport traceback\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.mixed_precision.experimental import policy\nfrom tensorflow.python.keras.optimizer_v2 import rmsprop\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.layers import core as legacy_core\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import summary_ops_v2\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.summary import summary_iterator\nfrom tensorflow.python.util import nest\n\n\nclass DynamicLayer(base_layer.Layer):\n\n def __init__(self, dynamic=False, **kwargs):\n super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs)\n\n def call(self, inputs):\n samples = tensor_array_ops.TensorArray(\n dtype=dtypes.float32, size=array_ops.shape(inputs)[0])\n for idx, sample in enumerate(inputs):\n samples = samples.write(idx, math_ops.square(sample))\n return samples.stack()\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass InvalidLayer(base_layer.Layer):\n\n def call(self, inputs):\n raise ValueError('You did something wrong!')\n\n\nclass BaseLayerTest(keras_parameterized.TestCase):\n\n @keras_parameterized.run_with_all_model_types\n def test_dynamic_layer(self):\n model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],\n input_shape=(3,))\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n @keras_parameterized.run_with_all_model_types\n def test_dynamic_layer_error(self):\n with self.assertRaisesRegexp(TypeError,\n 'attempting to use Python control flow'):\n model = testing_utils.get_model_from_layers([DynamicLayer()],\n input_shape=(3,))\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n @keras_parameterized.run_with_all_model_types\n def test_dynamic_layer_error_running_in_graph_mode(self):\n with context.graph_mode():\n model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],\n input_shape=(3,))\n self.assertEqual(model.dynamic, True)\n # But then you cannot run the model since you're in a graph scope.\n with self.assertRaisesRegexp(\n ValueError, 'You must enable eager execution'):\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n\n def test_manual_compute_output_shape(self):\n class BuildCounter(keras.layers.Layer):\n\n def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name\n super(BuildCounter, self).__init__(*args, **kwargs)\n self.build_counter = 0\n\n def build(self, input_shape):\n self.build_counter += 1\n\n def call(self, inputs):\n return inputs\n\n with context.eager_mode():\n layer = BuildCounter(dtype=dtypes.float64)\n output_shape = layer.compute_output_shape((None, 10))\n self.assertEqual(layer.build_counter, 1)\n self.assertEqual(output_shape.as_list(), [None, 10])\n output_signature = layer.compute_output_signature(\n tensor_spec.TensorSpec(dtype=dtypes.float64, shape=[None, 10]))\n self.assertEqual(layer.build_counter, 1)\n self.assertEqual(output_signature.dtype, dtypes.float64)\n self.assertEqual(output_signature.shape.as_list(), [None, 10])\n layer(np.ones((5, 10)))\n self.assertEqual(layer.build_counter, 1)\n\n def test_dynamic_layer_with_deferred_sequential_model(self):\n model = keras.Sequential(\n [DynamicLayer(dynamic=True),\n keras.layers.Dense(3)])\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n def test_nested_dynamic_layers_in_eager_mode(self):\n inputs = keras.Input((3,))\n outputs = DynamicLayer(dynamic=True)(inputs)\n inner_model = keras.Model(inputs, outputs)\n self.assertEqual(inner_model.dynamic, True)\n\n inputs = keras.Input((3,))\n x = DynamicLayer(dynamic=True)(inputs)\n outputs = inner_model(x)\n\n model = keras.Model(inputs, outputs)\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n def test_dynamic_subclassed_model_no_shape_inference(self):\n\n class MyModel(keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__(dynamic=True)\n self.layer1 = keras.layers.Dense(3)\n self.layer2 = keras.layers.Dense(3)\n\n def call(self, inputs):\n if math_ops.reduce_sum(inputs) > 0:\n return self.layer1(inputs)\n else:\n return self.layer2(inputs)\n\n model = MyModel()\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n self.assertEqual(model.outputs, [None])\n\n def test_dynamic_subclassed_model_with_shape_inference(self):\n\n class MyModel(keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__(dynamic=True)\n self.layer1 = keras.layers.Dense(3)\n self.layer2 = keras.layers.Dense(3)\n\n def call(self, inputs):\n if math_ops.reduce_sum(inputs) > 0:\n return self.layer1(inputs)\n else:\n return self.layer2(inputs)\n\n def compute_output_shape(self, input_shape):\n return tensor_shape.TensorShape(\n tuple(input_shape[:-1].as_list()) + (3,))\n\n model = MyModel()\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n self.assertEqual(model.outputs[0].shape.as_list(), [None, 3])\n\n @keras_parameterized.run_all_keras_modes\n def test_add_loss_correctness(self):\n\n class MyLayer(keras.layers.Layer):\n\n def call(self, inputs, training=None):\n self.add_loss(math_ops.reduce_sum(inputs))\n return inputs\n\n inputs = keras.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = keras.Model(inputs, outputs)\n self.assertEqual(len(model.losses), 1)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(loss, 2 * 3)\n\n @test_util.run_in_graph_and_eager_modes\n def test_invalid_forward_pass(self):\n inputs = keras.Input((3,))\n with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):\n _ = InvalidLayer()(inputs)\n\n def test_no_legacy_model(self):\n inputs = keras.Input((1,))\n legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0')\n legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1')\n\n layer = legacy_dense_0(inputs)\n layer = keras.layers.Dense(1)(layer)\n layer = legacy_dense_1(layer)\n\n expected_regex = (r'The following are legacy tf\\.layers\\.Layers:\\n '\n '{}\\n {}'.format(legacy_dense_0, legacy_dense_1))\n\n with self.assertRaisesRegexp(TypeError, expected_regex):\n _ = keras.models.Model(inputs=[inputs], outputs=[layer])\n\n model = keras.models.Model(inputs=[inputs], outputs=[inputs])\n with self.assertRaisesRegexp(TypeError, expected_regex):\n model._insert_layers([legacy_dense_0, legacy_dense_1])\n\n def test_no_legacy_sequential(self):\n layers = [\n keras.layers.Dense(1),\n legacy_core.Dense(1, name='legacy_dense_0')\n ]\n\n expected_regex = r'legacy tf\\.layers\\.Layers:\\n {}'.format(layers[1])\n with self.assertRaisesRegexp(TypeError, expected_regex):\n _ = keras.models.Sequential(layers)\n\n with self.assertRaisesRegexp(TypeError, expected_regex):\n _ = keras.models.Sequential([keras.layers.Input(shape=(4,))] + layers)\n\n model = keras.models.Sequential()\n with self.assertRaisesRegexp(TypeError, expected_regex):\n for l in layers:\n model.add(l)\n\n @keras_parameterized.run_with_all_model_types\n @test_util.run_in_graph_and_eager_modes\n def test_build_with_numpy_data(self):\n model_layers = [\n keras.layers.Dense(3, activation='relu', kernel_initializer='ones'),\n keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')\n ]\n model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))\n model(np.zeros((2, 4), dtype='float32'))\n self.assertTrue(model.built)\n\n @test_util.run_in_graph_and_eager_modes\n def test_default_add_weight(self):\n\n class TestLayer(keras.layers.Layer):\n\n def __init__(self):\n super(TestLayer, self).__init__()\n self.default_weight = self.add_weight()\n self.weight_without_name = self.add_weight(shape=(3, 4))\n self.regularized_weight_without_name = self.add_weight(\n shape=(3, 4), regularizer='l2')\n\n layer = TestLayer()\n self.assertEqual(layer.default_weight.shape.as_list(), [])\n self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])\n self.assertEqual(layer.default_weight.dtype.name, 'float32')\n self.assertEqual(layer.weight_without_name.dtype.name, 'float32')\n self.assertEqual(len(layer.losses), 1)\n if not context.executing_eagerly():\n # Cannot access tensor.name in eager execution.\n self.assertTrue('Variable_2/Regularizer' in layer.losses[0].name)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_learning_phase_freezing_for_layers(self):\n class LearningPhaseLayer(keras.layers.Layer):\n\n def call(self, inputs):\n return keras.backend.in_train_phase(\n lambda: array_ops.ones_like(inputs),\n lambda: array_ops.zeros_like(inputs))\n\n def get_learning_phase_value():\n model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])\n model._run_eagerly = testing_utils.should_run_eagerly()\n model._experimental_run_tf_function = (\n testing_utils.should_run_tf_function())\n return np.sum(model(np.ones((1, 1))))\n\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Test scope.\n with keras.backend.learning_phase_scope(1):\n self.assertEqual(get_learning_phase_value(), 1)\n\n # The effects of the scope end after exiting it.\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Test setting.\n keras.backend.set_learning_phase(1)\n self.assertEqual(get_learning_phase_value(), 1)\n keras.backend.set_learning_phase(0)\n self.assertEqual(get_learning_phase_value(), 0)\n\n @keras_parameterized.run_all_keras_modes\n def test_learning_phase_freezing_for_layers_in_predict(self):\n if not (testing_utils.should_run_eagerly() or\n testing_utils.should_run_tf_function()):\n self.skipTest('Predict fails to override the outer learning phase in'\n 'the FuncGraph path.')\n\n class LearningPhaseLayer(keras.layers.Layer):\n\n def call(self, inputs):\n return keras.backend.in_train_phase(\n lambda: array_ops.ones_like(inputs),\n lambda: array_ops.zeros_like(inputs))\n\n def get_learning_phase_value():\n model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])\n model._run_eagerly = testing_utils.should_run_eagerly()\n model._experimental_run_tf_function = (\n testing_utils.should_run_tf_function())\n return np.sum(model.predict(np.ones((1, 1))))\n\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Test scope.\n with keras.backend.learning_phase_scope(1):\n self.assertEqual(get_learning_phase_value(), 0)\n\n # The effects of the scope end after exiting it.\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Test setting.\n keras.backend.set_learning_phase(1)\n self.assertEqual(get_learning_phase_value(), 0)\n keras.backend.set_learning_phase(0)\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Cannot be enabled with `run_eagerly=True`, see b/123904578\n @test_util.run_all_in_graph_and_eager_modes\n def test_layer_can_return_variable(self):\n\n class ComputeSum(keras.layers.Layer):\n\n def __init__(self):\n super(ComputeSum, self).__init__()\n self.total = variables.Variable(\n initial_value=array_ops.zeros((1, 1)), trainable=False)\n if not context.executing_eagerly():\n keras.backend.get_session().run(self.total.initializer)\n\n def call(self, inputs):\n self.total.assign_add(inputs)\n return self.total\n\n inputs = keras.Input(shape=(1,))\n model = keras.Model(inputs, ComputeSum()(inputs))\n model.predict(np.ones((1, 1)))\n\n def _get_layer_with_training_arg(self):\n\n class TrainingLayer(keras.layers.Layer):\n \"\"\"A layer with a `training` argument in a defuned `call`.\"\"\"\n\n @def_function.function\n def call(self, inputs, training=None):\n if training is None:\n training = keras.backend.learning_phase()\n return tf_utils.smart_cond(training,\n lambda: array_ops.ones_like(inputs),\n lambda: array_ops.zeros_like(inputs))\n\n return TrainingLayer()\n\n @keras_parameterized.run_with_all_model_types\n # b/124459427: can't test with `run_eagerly=True` for now.\n @test_util.run_in_graph_and_eager_modes\n def test_training_arg_in_defun(self):\n layer = self._get_layer_with_training_arg()\n model = testing_utils.get_model_from_layers([layer], input_shape=(1,))\n model.compile(rmsprop.RMSprop(0.),\n loss='mae')\n history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(history.history['loss'][0], 1.)\n loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(loss, 0.)\n\n # Test that the argument injection performed in `call` is not active\n # when the argument is passed explicitly.\n layer = self._get_layer_with_training_arg()\n inputs = keras.Input(shape=(1,))\n # Pass `training` by name\n outputs = layer(inputs, training=False)\n model = keras.Model(inputs, outputs)\n model.compile(rmsprop.RMSprop(0.),\n loss='mae')\n history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(history.history['loss'][0], 0.)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_raw_variable_assignment(self):\n\n class RawVariableLayer(keras.layers.Layer):\n\n def __init__(self, **kwargs):\n super(RawVariableLayer, self).__init__(**kwargs)\n # Test variables in nested structure.\n self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]\n\n def call(self, inputs):\n return inputs * self.var_list[0] * self.var_list[1]['a']\n\n model = testing_utils.get_model_from_layers([RawVariableLayer()],\n input_shape=(10,))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n x, y = np.ones((10, 10)), np.ones((10, 10))\n # Checks that variables get initialized.\n model.fit(x, y, batch_size=2, epochs=2)\n\n @test_util.run_in_graph_and_eager_modes\n def test_layer_names(self):\n inputs = keras.layers.Input(shape=[2])\n add1 = inputs + inputs\n add2 = keras.layers.Add()([inputs, inputs])\n add3 = inputs + inputs\n add4 = keras.layers.Add()([inputs, inputs])\n model = keras.models.Model(\n inputs=[inputs], outputs=[add1, add2, add3, add4])\n self.assertEqual(\n [l.name for l in model.layers],\n ['input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1'])\n\n def test_add_trainable_weight_on_frozen_layer(self):\n\n class TestLayer(keras.layers.Layer):\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=(), trainable=True)\n\n def call(self, inputs):\n return self.w * inputs\n\n layer = TestLayer()\n layer.trainable = False\n layer.build(None)\n layer.trainable = True\n self.assertListEqual(layer.trainable_weights, [layer.w])\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_passing_initial_weights_values(self):\n kernel_value = np.random.random((10, 2))\n layer_with_weights = keras.layers.Dense(\n 2, use_bias=False, weights=[kernel_value])\n\n model = testing_utils.get_model_from_layers([layer_with_weights],\n input_shape=(10,))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n inputs = np.random.random((3, 10))\n out = model.predict(inputs)\n self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)\n self.assertAllClose(out, np.dot(inputs, kernel_value))\n\n @test_util.run_in_graph_and_eager_modes\n def test_set_weights_and_get_weights(self):\n layer = keras.layers.Dense(2)\n layer.build((None, 10))\n kernel = np.random.random((10, 2))\n bias = np.random.random((2,))\n layer.set_weights([kernel, bias])\n weights = layer.get_weights()\n self.assertEqual(len(weights), 2)\n self.assertAllClose(weights[0], kernel)\n self.assertAllClose(weights[1], bias)\n with self.assertRaisesRegexp(\n ValueError, 'but the layer was expecting 2 weights'):\n layer.set_weights([1, 2, 3])\n with self.assertRaisesRegexp(\n ValueError, 'not compatible with provided weight shape'):\n layer.set_weights([kernel.T, bias])\n\n def test_get_config_error(self):\n\n class MyLayer(keras.layers.Layer):\n\n def __init__(self, my_kwarg='default', **kwargs):\n super(MyLayer, self).__init__(**kwargs)\n self.my_kwarg = my_kwarg\n\n # `__init__` includes kwargs but `get_config` is not overridden, so\n # an error should be thrown:\n with self.assertRaises(NotImplementedError):\n MyLayer('custom').get_config()\n\n class MyLayerNew(keras.layers.Layer):\n\n def __init__(self, my_kwarg='default', **kwargs):\n super(MyLayerNew, self).__init__(**kwargs)\n self.my_kwarg = my_kwarg\n\n def get_config(self):\n config = super(MyLayerNew, self).get_config()\n config['my_kwarg'] = self.my_kwarg\n return config\n\n # Test to make sure that error is not raised if the method call is\n # from an overridden `get_config`:\n self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom')\n\n class MyLayerNew2(keras.layers.Layer):\n\n def __init__(self, name='MyLayerName', dtype=None, **kwargs): # pylint:disable=redefined-outer-name\n super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs)\n\n # Check that if the kwargs in `__init__` are base layer constructor\n # arguments, no error is thrown:\n self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New')\n\n\nclass SymbolicSupportTest(test.TestCase):\n\n def test_using_symbolic_tensors_with_tf_ops(self):\n # Single-input.\n x = keras.Input((3,))\n y = math_ops.square(x)\n self.assertEqual(y.graph, keras.backend.get_graph())\n\n # Multi-inputs.\n x1, x2 = keras.Input((3,)), keras.Input((3,))\n y = array_ops.concat([x1, x2], axis=1)\n self.assertEqual(y.graph, keras.backend.get_graph())\n\n # Mixing Keras symbolic tensors and graph tensors from the same graph works.\n with keras.backend.get_graph().as_default():\n x1 = keras.Input((3,))\n x2 = keras.Input((3,))\n y = math_ops.matmul(x1, x2)\n self.assertEqual(y.graph, keras.backend.get_graph())\n\n # Creating same op type (matmul) multiple times in the Keras graph works.\n x1 = keras.Input((3,))\n x2 = keras.Input((3,))\n y = math_ops.matmul(x1, x2)\n self.assertEqual(y.graph, keras.backend.get_graph())\n\n def test_mixing_eager_and_graph_tensors(self):\n with ops.Graph().as_default():\n x1 = array_ops.ones((3, 3))\n x2 = array_ops.ones((3, 3))\n self.assertIsInstance(x2, ops.EagerTensor)\n with self.assertRaisesRegexp(TypeError, 'Graph tensors'):\n math_ops.matmul(x1, x2)\n\n def test_mixing_numpy_arrays_and_graph_tensors(self):\n with ops.Graph().as_default():\n x1 = array_ops.ones((3, 3))\n x2 = np.ones((3, 3), dtype='float32')\n with self.assertRaisesRegexp(TypeError, 'Graph tensors'):\n math_ops.matmul(x1, x2)\n\n @test_util.run_in_graph_and_eager_modes\n def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):\n x1 = keras.Input((3,))\n x2 = array_ops.ones((3, 3))\n y = math_ops.matmul(x1, x2)\n self.assertEqual(y.graph, keras.backend.get_graph())\n fn = keras.backend.function(inputs=[x1], outputs=[y])\n x_val = np.random.random((3, 3))\n y_val = np.ones((3, 3))\n self.assertAllClose(fn([x_val])[0],\n np.matmul(x_val, y_val),\n atol=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):\n x1 = keras.Input((3,))\n x2 = np.ones((3, 3), dtype='float32')\n y = math_ops.matmul(x1, x2)\n self.assertEqual(y.graph, keras.backend.get_graph())\n fn = keras.backend.function(inputs=[x1], outputs=[y])\n x_val = np.random.random((3, 3))\n y_val = np.ones((3, 3))\n self.assertAllClose(fn([x_val])[0],\n np.matmul(x_val, y_val),\n atol=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def test_reraising_exception(self):\n # When layer is not dynamic, we have some pattern matching during exception\n # handling to detect when the user is trying to use python control flow.\n # When an exception is thrown but the pattern doesn't match, we want to\n # preserve the originating stack trace. An early implementation of this\n # logic lost the stack trace. We test the correct behavior here.\n\n class TypeErrorLayer(base_layer.Layer):\n\n def call(self, inputs):\n def easily_identifiable_name():\n raise TypeError('Non-matching TypeError message.')\n easily_identifiable_name()\n\n inputs = keras.Input((3,))\n\n try:\n _ = TypeErrorLayer()(inputs)\n except TypeError as e:\n if hasattr(e, 'ag_error_metadata'):\n self.assertIn('easily_identifiable_name', str(e))\n # See ErrorMetadataBase in autograph/pyct/errors.py\n # Topmost frame corresponds to `call` itself.\n function_name = e.ag_error_metadata.translated_stack[-2].function_name\n else:\n tb = traceback.extract_tb(sys.exc_info()[2])\n last_entry = tb[-1]\n function_name = last_entry[2]\n self.assertEqual(function_name, 'easily_identifiable_name')\n\n @test_util.run_in_graph_and_eager_modes\n def test_summaries_in_tf_function(self):\n if not context.executing_eagerly():\n return\n\n class MyLayer(keras.layers.Layer):\n\n def call(self, inputs):\n summary_ops_v2.scalar('mean', math_ops.reduce_mean(inputs))\n return inputs\n\n tmp_dir = self.get_temp_dir()\n writer = summary_ops_v2.create_file_writer_v2(tmp_dir)\n with writer.as_default(), summary_ops_v2.always_record_summaries():\n my_layer = MyLayer()\n x = array_ops.ones((10, 10))\n\n def my_fn(x):\n return my_layer(x)\n\n _ = my_fn(x)\n\n event_file = gfile.Glob(os.path.join(tmp_dir, 'events*'))\n self.assertLen(event_file, 1)\n event_file = event_file[0]\n tags = set()\n for e in summary_iterator.summary_iterator(event_file):\n for val in e.summary.value:\n tags.add(val.tag)\n self.assertEqual(set(['my_layer/mean']), tags)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass NestedTrackingTest(test.TestCase):\n\n def test_nested_layer_variable_tracking(self):\n # Test that variables from nested sublayers are\n # being tracked by subclassed layers.\n\n class MyLayer(keras.layers.Layer):\n\n def __init__(self):\n super(MyLayer, self).__init__()\n self.dense1 = keras.layers.Dense(1)\n self.dense2 = keras.layers.BatchNormalization()\n\n def build(self, input_shape):\n self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())\n self.v2 = variables.Variable(\n name='v2',\n initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),\n trainable=False)\n\n def call(self, inputs):\n x = self.dense1(inputs) + self.dense2(inputs)\n return x + self.v1 + self.v2\n\n layer = MyLayer()\n inputs = keras.Input((1,))\n _ = layer(inputs)\n\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 5)\n self.assertEqual(len(layer.non_trainable_weights), 3)\n\n layer.dense1.trainable = False\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 3)\n self.assertEqual(len(layer.non_trainable_weights), 5)\n\n layer.trainable = False\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 0)\n self.assertEqual(len(layer.non_trainable_weights), 8)\n self.assertEqual(\n set([layer.dense1, layer.dense2, layer.v1, layer.v2]),\n set([obj for unused_name, obj in layer._checkpoint_dependencies]))\n\n def test_nested_layer_updates_losses_tracking(self):\n # Test that updates and losses from nested sublayers are\n # being tracked by subclassed layers.\n\n class UpdateAndLossLayer(keras.layers.Layer):\n\n def build(self, _):\n self.v1 = self.add_weight('v1', shape=())\n\n def call(self, inputs):\n self.add_loss(math_ops.reduce_sum(inputs))\n self.add_update(state_ops.assign_add(self.v1, 1))\n return inputs + 1\n\n class MyLayer(keras.layers.Layer):\n\n def build(self, _):\n self.v1 = self.add_weight('v1', shape=())\n\n def __init__(self):\n super(MyLayer, self).__init__()\n self.ul1 = UpdateAndLossLayer()\n self.ul2 = UpdateAndLossLayer()\n\n def call(self, inputs):\n self.add_loss(math_ops.reduce_sum(inputs))\n self.add_update(state_ops.assign_add(self.v1, 1))\n x = self.ul1(inputs)\n return self.ul2(x)\n\n layer = MyLayer()\n\n if context.executing_eagerly():\n inputs = array_ops.ones((3, 1))\n _ = layer(inputs)\n self.assertEqual(len(layer.losses), 3)\n self.assertLen(layer.get_losses_for(None), 3)\n else:\n inputs = keras.Input((1,))\n _ = layer(inputs)\n self.assertEqual(len(layer.losses), 3)\n self.assertEqual(len(layer.updates), 3)\n self.assertLen(layer.get_losses_for(None), 3)\n\n def test_attribute_reassignment(self):\n l = keras.layers.Layer()\n l.a = keras.layers.Layer()\n l.a = []\n l.a = variables.Variable(1.)\n l.a = keras.layers.Layer()\n last_assignment = keras.layers.Layer()\n l.a = last_assignment\n l.b = variables.Variable(1.)\n del l.b\n l.c = keras.layers.Layer()\n del l.c\n l.d = last_assignment\n del l.d\n self.assertEqual([last_assignment], l._layers)\n self.assertEqual([], l.trainable_weights)\n self.assertEqual([], l.non_trainable_weights)\n self.assertEqual([], l.weights)\n del l.a\n self.assertEqual([], l._layers)\n\n def test_assign_op_not_tracked_as_variable(self):\n\n class LayerWithAssignAttr(keras.layers.Layer):\n\n def build(self, input_shape):\n self.v = variables.Variable(1.)\n self.v_assign = self.v.assign_add(2.)\n\n layer = LayerWithAssignAttr()\n layer.build((10, 10))\n\n self.assertEqual([layer.v], layer.variables)\n\n def test_layer_class_not_tracked_as_sublayer(self):\n # See https://github.com/tensorflow/tensorflow/issues/27431 for details.\n\n class LayerWithClassAttribute(keras.layers.Layer):\n\n def __init__(self):\n super(LayerWithClassAttribute, self).__init__()\n self.layer_fn = keras.layers.Dense\n\n layer = LayerWithClassAttribute()\n self.assertEmpty(layer.variables)\n self.assertEmpty(layer.submodules)\n\n def test_layer_call_fn_args(self):\n\n class NonDefunLayer(keras.layers.Layer):\n\n def call(self, inputs, a, mask, b=None, training=None):\n return inputs\n\n class DefunLayer(keras.layers.Layer):\n\n @def_function.function\n def call(self, x, mask, a, training=None, b=None):\n return x\n\n nondefun_layer = NonDefunLayer()\n self.assertEqual(nondefun_layer._call_fn_args,\n ['inputs', 'a', 'mask', 'b', 'training'])\n defun_layer = DefunLayer()\n self.assertEqual(defun_layer._call_fn_args,\n ['x', 'mask', 'a', 'training', 'b'])\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass NameScopingTest(keras_parameterized.TestCase):\n\n def test_name_scope_layer(self):\n x = keras.backend.placeholder(shape=(10, 10))\n layer = keras.layers.Dense(10, name='MyName')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName/kernel:0')\n\n def test_name_scope_sublayer(self):\n\n class NameScopeTracker(keras.layers.Layer):\n\n def call(self, inputs):\n self.active_name_scope = ops.get_name_scope()\n return inputs\n\n x = keras.backend.placeholder(shape=(10, 10))\n sublayer = NameScopeTracker(name='Sublayer')\n layer = keras.layers.Dense(10, activation=sublayer, name='MyName2')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName2/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')\n self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')\n\n def test_name_scope_tf_tensor(self):\n x = ops.convert_to_tensor(np.ones((10, 10)))\n layer = keras.layers.Dense(\n 10, activation=keras.layers.ReLU(name='MyAct'), name='MyName3')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName3/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass AutographControlFlowTest(keras_parameterized.TestCase):\n\n def test_disabling_in_context_is_matched(self):\n\n test_obj = self\n\n class MyLayer(keras.layers.Layer):\n\n def call(self, inputs, training=None):\n with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'):\n if constant_op.constant(False):\n return inputs * 1.\n return inputs * 0.\n\n @def_function.function(autograph=False)\n def test_fn():\n return MyLayer()(constant_op.constant([[1., 2., 3.]]))\n\n test_fn()\n\n def test_if_training_pattern_output(self):\n\n class MyLayer(keras.layers.Layer):\n\n def call(self, inputs, training=None):\n if training:\n return inputs * 1.\n return inputs * 0.\n\n inputs = keras.Input((3,))\n outputs = MyLayer()(inputs)\n model = keras.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(train_loss, 0.)\n test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(test_loss, 1.)\n\n def test_if_training_pattern_loss(self):\n\n class MyLayer(keras.layers.Layer):\n\n def call(self, inputs, training=None):\n if training:\n loss = math_ops.reduce_sum(inputs)\n else:\n loss = 0.\n self.add_loss(loss)\n return inputs\n\n inputs = keras.Input((3,))\n outputs = MyLayer()(inputs)\n model = keras.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(train_loss, 2 * 3)\n test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(test_loss, 0)\n\n def test_if_training_pattern_metric(self):\n\n class MyLayer(keras.layers.Layer):\n\n def call(self, inputs, training=None):\n if training:\n metric = math_ops.reduce_sum(inputs)\n else:\n metric = 0.\n self.add_metric(metric, name='my_metric', aggregation='mean')\n return inputs\n\n inputs = keras.Input((3,))\n outputs = MyLayer()(inputs)\n model = keras.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n _, train_metric = model.train_on_batch(np.ones((2, 3)),\n np.ones((2, 3)))\n self.assertEqual(train_metric, 2 * 3)\n _, test_metric = model.test_on_batch(np.ones((2, 3)),\n np.ones((2, 3)))\n self.assertEqual(test_metric, 0)\n\n def test_if_training_pattern_update(self):\n\n class MyLayer(keras.layers.Layer):\n\n def build(self, input_shape):\n self.counter = self.add_weight(\n shape=(), trainable=False, initializer='zeros')\n\n def call(self, inputs, training=None):\n if training:\n increment = 1.\n else:\n increment = 0.\n self.counter.assign_add(increment)\n return inputs\n\n inputs = keras.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = keras.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(keras.backend.get_value(layer.counter), 1.)\n\n def test_conditional_updates_in_call(self):\n\n class MyLayer(keras.layers.Layer):\n\n def __init__(self):\n super(MyLayer,\n self).__init__(dynamic=testing_utils.should_run_eagerly())\n\n def build(self, input_shape):\n self.counter = self.add_weight(\n shape=(), trainable=False, initializer='zeros')\n\n def call(self, inputs, training=None):\n if training:\n z = math_ops.reduce_sum(inputs)\n self.add_update(lambda: self.counter.assign_add(z))\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n if testing_utils.should_run_eagerly():\n inputs = keras.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = keras.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(keras.backend.get_value(layer.counter), 6.)\n else:\n # TODO(fchollet): support the same workflow in graph mode.\n with self.assertRaisesRegexp(RuntimeError,\n '`add_update` in a control flow branch'):\n layer = MyLayer()\n layer(keras.Input((3,)))\n _ = layer.updates\n\n def test_conditional_losses_in_call(self):\n\n class MyLayer(keras.layers.Layer):\n\n def __init__(self):\n super(MyLayer,\n self).__init__(dynamic=testing_utils.should_run_eagerly())\n\n def call(self, inputs, training=None):\n if training:\n self.add_loss(math_ops.reduce_sum(inputs))\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n if testing_utils.should_run_eagerly():\n inputs = keras.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = keras.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(loss, 2 * 3)\n else:\n with self.assertRaisesRegexp(RuntimeError,\n '`add_loss` in a control flow branch'):\n layer = MyLayer()(keras.Input((3,)))\n\n def test_conditional_callable_losses(self):\n model = keras.Sequential([\n keras.layers.Dense(\n 1, kernel_regularizer=keras.regularizers.l2(1e-4), input_shape=(1,))\n ])\n model._run_eagerly = testing_utils.should_run_eagerly()\n model._experimental_run_tf_function = testing_utils.should_run_tf_function()\n\n def assert_graph(t):\n if not context.executing_eagerly():\n self.assertEqual(t.graph, ops.get_default_graph())\n\n @def_function.function\n def get_losses(t):\n if t < 0:\n return math_ops.reduce_sum(model.losses) * t\n else:\n return math_ops.reduce_sum(model.losses)\n\n assert_graph(get_losses(constant_op.constant(2.)))\n assert_graph(get_losses(constant_op.constant(0.5)))\n\n def test_conditional_metrics_in_call(self):\n\n class MyLayer(keras.layers.Layer):\n\n def __init__(self):\n super(MyLayer,\n self).__init__(dynamic=testing_utils.should_run_eagerly())\n\n def call(self, inputs, training=None):\n if training:\n self.add_metric(math_ops.reduce_sum(inputs),\n name='sum',\n aggregation='mean')\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n if testing_utils.should_run_eagerly():\n inputs = keras.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = keras.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n history = model.fit(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(history.history['sum'][-1], 2 * 3)\n else:\n # TODO(fchollet): support the same workflow in graph mode.\n with self.assertRaisesRegexp(RuntimeError,\n '`add_metric` in a control flow branch'):\n layer = MyLayer()(keras.Input((3,)))\n\n def test_conditional_activity_regularizer_in_call(self):\n\n class TestModel(keras.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(\n name='test_model', dynamic=testing_utils.should_run_eagerly())\n self.layer = keras.layers.Dense(2, activity_regularizer='l2')\n\n def call(self, x, training=None):\n if math_ops.greater(math_ops.reduce_sum(x), 0.0):\n return self.layer(x)\n else:\n return self.layer(x)\n\n model = TestModel()\n model.compile(\n loss='mse',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n x = np.ones(shape=(10, 1))\n y = np.ones(shape=(10, 2))\n\n if testing_utils.should_run_eagerly():\n model.fit(x, y, epochs=2, batch_size=5)\n else:\n with self.assertRaisesRegexp(\n RuntimeError, '`activity_regularizer` in a control flow branch'):\n model.fit(x, y, epochs=2, batch_size=5)\n\n def test_conditional_activity_regularizer_with_wrappers_in_call(self):\n\n class TestModel(keras.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(\n name='test_model', dynamic=testing_utils.should_run_eagerly())\n self.layer = keras.layers.TimeDistributed(\n keras.layers.Dense(2, activity_regularizer='l2'),\n input_shape=(3, 4))\n\n def call(self, x, training=None):\n if math_ops.greater(math_ops.reduce_sum(x), 0.0):\n return self.layer(x)\n else:\n return self.layer(x)\n\n model = TestModel()\n model.compile(\n loss='mse',\n optimizer='sgd',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n x = np.ones(shape=(10, 3, 4))\n y = np.ones(shape=(10, 3, 2))\n\n if testing_utils.should_run_eagerly():\n model.fit(x, y, epochs=2, batch_size=5)\n else:\n with self.assertRaisesRegexp(\n RuntimeError, '`activity_regularizer` in a control flow branch'):\n model.fit(x, y, epochs=2, batch_size=5)\n\n\nclass AddLayer(keras.layers.Layer):\n \"\"\"A layer which adds it's input to a variable.\n\n Useful for testing a layer with a variable\n \"\"\"\n\n def build(self, _):\n self.v = self.add_weight('v', (), initializer='ones')\n self.built = True\n\n def call(self, inputs):\n return inputs + self.v\n\n\nclass IdentityLayer(keras.layers.Layer):\n \"\"\"A layer that returns it's input.\n\n Useful for testing a layer without a variable.\n \"\"\"\n\n def call(self, inputs):\n return inputs\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass DTypeTest(keras_parameterized.TestCase):\n\n # This class only have tests relating to layer.dtype. Tests for dtype policies\n # are in mixed_precision/experimental/keras_test.py\n\n def _const(self, dtype):\n return array_ops.constant(1, dtype=dtype)\n\n @testing_utils.enable_v2_dtype_behavior\n def test_dtype_defaults_to_floatx(self):\n layer = AddLayer()\n self.assertEqual(layer.dtype, 'float32')\n layer(self._const('float64'))\n self.assertEqual(layer.dtype, 'float32') # dtype should not change\n\n try:\n backend.set_floatx('float64')\n layer = AddLayer()\n self.assertEqual(layer.dtype, 'float64')\n finally:\n backend.set_floatx('float32')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_passing_dtype_to_constructor(self):\n layer = IdentityLayer(dtype='float64')\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'float64')\n\n layer = IdentityLayer(dtype='int32')\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'int32')\n\n layer = IdentityLayer(dtype=dtypes.float64)\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'float64')\n\n @testing_utils.enable_v2_dtype_behavior\n def input_cast_to_dtype(self):\n layer = AddLayer()\n\n # Input should be cast to layer.dtype, so output should also be layer.dtype\n self.assertEqual(layer(self._const('float64')).dtype, 'float32')\n\n layer = AddLayer(dtype='float64')\n self.assertEqual(layer(self._const('float32')).dtype, 'float64')\n\n # Test inputs are not casted if layer.dtype is not floating-point\n layer = IdentityLayer(dtype='int32')\n self.assertEqual(layer(self._const('float64')).dtype, 'float64')\n\n # Test inputs are not casted if the inputs are not floating-point\n layer = IdentityLayer(dtype='float32')\n self.assertEqual(layer(self._const('int32')).dtype, 'int32')\n\n # Test Numpy arrays are casted\n layer = IdentityLayer(dtype='float64')\n self.assertEqual(layer(np.array(1, dtype='float32')).dtype, 'float64')\n\n # Test Python floats are casted\n layer = IdentityLayer(dtype='float64')\n self.assertEqual(layer(1.).dtype, 'float64')\n\n @testing_utils.enable_v2_dtype_behavior\n def multiple_inputs_cast_to_dtype(self):\n\n class MultiIdentityLayer(keras.layers.Layer):\n\n def call(self, inputs):\n return [array_ops.identity(x) for x in inputs]\n\n # Testing layer with default dtype of float32\n layer = MultiIdentityLayer()\n x, y = layer([self._const('float16'), self._const('float32')])\n self.assertEqual(x.dtype, 'float32')\n self.assertEqual(y.dtype, 'float32')\n\n # Test passing dtype to the constructor\n layer = MultiIdentityLayer(dtype='float64')\n x, y = layer([self._const('float16'), self._const('float32')])\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'float64')\n\n # Test several non-floating point types\n layer = MultiIdentityLayer(dtype='float64')\n x, y, z, w = layer([self._const('float16'), self._const('bool'),\n self._const('float64'), self._constant('complex64')])\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'bool')\n self.assertEqual(z.dtype, 'float64')\n self.assertEqual(w.dtype, 'complex64')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_extra_args_and_kwargs_not_casted(self):\n\n class IdentityLayerWithArgs(keras.layers.Layer):\n\n def call(self, inputs, *args, **kwargs):\n return nest.flatten([inputs, args, kwargs])\n\n layer = IdentityLayerWithArgs(dtype='float64')\n x, y, z = layer(self._const('float16'), self._const('float16'),\n kwarg=self._const('float16'))\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'float16')\n self.assertEqual(z.dtype, 'float16')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_layer_without_autocast(self):\n\n class IdentityLayerWithoutAutocast(IdentityLayer):\n\n def __init__(self, *args, **kwargs):\n kwargs['experimental_autocast'] = False\n super(IdentityLayerWithoutAutocast, self).__init__(*args, **kwargs)\n\n layer = IdentityLayerWithoutAutocast(dtype='float64')\n self.assertEqual(layer(self._const('float32')).dtype, 'float32')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_dtype_warnings(self):\n # Test a layer warns when it casts inputs.\n layer = IdentityLayer()\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n self.assertRegexpMatches(\n str(mock_warn.call_args),\n \".*from dtype float64 to the layer's dtype of float32.*\"\n \"The layer has dtype float32 because.*\")\n\n # Test a layer does not warn a second time\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n mock_warn.assert_not_called()\n\n # Test a new layer can warn even if a different layer already warned\n layer = IdentityLayer()\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n self.assertRegexpMatches(\n str(mock_warn.call_args),\n \".*from dtype float64 to the layer's dtype of float32.*\"\n \"The layer has dtype float32 because.*\")\n\n # Test a layer does not warn if a dtype is passed\n layer = IdentityLayer(dtype='float32')\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n mock_warn.assert_not_called()\n\n # Test a layer does not warn if a Policy is set:\n with policy.policy_scope('float32'):\n layer = IdentityLayer()\n with test.mock.patch.object(tf_logging, 'warn') as mock_warn:\n layer(self._const('float64'))\n mock_warn.assert_not_called()\n\n @testing_utils.enable_v2_dtype_behavior\n def test_compute_output_signature(self):\n\n class IdentityLayerWithOutputShape(IdentityLayer):\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n layer = IdentityLayerWithOutputShape(dtype='float64')\n output_signature = layer.compute_output_signature(\n tensor_spec.TensorSpec(shape=(), dtype='float32'))\n self.assertEqual(output_signature.shape, ())\n self.assertEqual(output_signature.dtype, 'float64')\n\n @testing_utils.enable_v2_dtype_behavior\n def test_passing_non_tensor(self):\n layer = IdentityLayer()\n x = object()\n y = layer(x) # Layer should not cast 'x', as it's not a tensor\n self.assertIs(x, y)\n\n @testing_utils.disable_v2_dtype_behavior\n def test_v1_behavior(self):\n # Test dtype defaults to None and inferred from input\n layer = IdentityLayer()\n self.assertIsNone(layer.dtype)\n layer(self._const('float64'))\n self.assertEqual(layer.dtype, 'float64')\n\n # Test layer does not cast to dtype\n self.assertEqual(layer(self._const('float32')).dtype, 'float32')\n\n_LAYERS_TO_TEST = [\n (keras.layers.Dense, (1,), collections.OrderedDict(units=[1])),\n (keras.layers.Activation, (2, 2),\n collections.OrderedDict(activation=['relu'])),\n (keras.layers.Dropout, (16,), collections.OrderedDict(rate=[0.25])),\n (keras.layers.BatchNormalization, (8, 8, 3), collections.OrderedDict(\n axis=[3], center=[True, False], scale=[True, False])),\n (keras.layers.Conv1D, (8, 8), collections.OrderedDict(\n filters=[1], kernel_size=[1, 3], strides=[1, 2],\n padding=['valid', 'same'], use_bias=[True, False],\n kernel_regularizer=[None, 'l2'])),\n (keras.layers.Conv2D, (8, 8, 3), collections.OrderedDict(\n filters=[1], kernel_size=[1, 3], strides=[1, 2],\n padding=['valid', 'same'], use_bias=[True, False],\n kernel_regularizer=[None, 'l2'])),\n (keras.layers.LSTM, (8, 8), collections.OrderedDict(\n units=[1],\n activation=[None, 'relu'],\n kernel_regularizer=[None, 'l2'],\n dropout=[0, 0.5],\n stateful=[True, False],\n unroll=[True, False])),\n]\n\nOUTPUT_TEST_CASES = []\nfor layer_type, inp_shape, arg_dict in _LAYERS_TO_TEST:\n arg_combinations = [[(k, i) for i in v] for k, v in arg_dict.items()] # pylint: disable=g-complex-comprehension\n for arguments in it.product(*arg_combinations):\n name = '_{}_{}'.format(layer_type.__name__,\n '_'.join('{}_{}'.format(k, v) for k, v in arguments))\n OUTPUT_TEST_CASES.append(\n (name, layer_type, inp_shape, {k: v for k, v in arguments}))\n\n\nclass OutputTypeTest(keras_parameterized.TestCase):\n \"\"\"Test that layers and models produce the correct tensor types.\"\"\"\n\n # In v1 graph there are only symbolic tensors.\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n @parameterized.named_parameters(*OUTPUT_TEST_CASES)\n def test_layer_outputs(self, layer_to_test, input_shape, layer_kwargs):\n layer = layer_to_test(**layer_kwargs)\n\n input_data = np.ones(shape=(2,) + input_shape, dtype=np.float32)\n layer_result = layer(input_data)\n\n inp = keras.layers.Input(shape=input_shape, batch_size=2)\n model = keras.models.Model(inp, layer_to_test(**layer_kwargs)(inp))\n model_result = model(input_data)\n\n for x in [layer_result, model_result]:\n if not isinstance(x, ops.Tensor):\n raise ValueError('Tensor or EagerTensor expected, got type {}'\n .format(type(x)))\n\n if isinstance(x, ops.EagerTensor) != context.executing_eagerly():\n expected_type = (ops.EagerTensor if context.executing_eagerly()\n else ops.Tensor)\n raise ValueError('Expected type {}, got type {}'\n .format(expected_type, type(x)))\n\n\nif __name__ == '__main__':\n ops.enable_eager_execution()\n test.main()\n"
] | [
[
"numpy.ones",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.layers.Add",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.keras.testing_utils.get_model_from_layers",
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.keras.backend.function",
"tensorflow.python.summary.summary_iterator.summary_iterator",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.keras.models.Sequential",
"tensorflow.python.keras.backend.placeholder",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.keras.mixed_precision.experimental.policy.policy_scope",
"tensorflow.python.framework.ops.get_name_scope",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.keras.layers.ReLU",
"tensorflow.python.keras.Model",
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.keras.Input",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.summary_ops_v2.always_record_summaries",
"numpy.zeros",
"numpy.dot",
"tensorflow.python.keras.backend.set_floatx",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.keras.keras_parameterized.run_all_keras_modes",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.summary_ops_v2.create_file_writer_v2",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.keras.layers.Layer",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop",
"tensorflow.python.keras.backend.learning_phase_scope",
"tensorflow.python.keras.regularizers.l2",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.ones",
"numpy.matmul",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.keras.models.Model",
"numpy.random.random",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.keras.backend.get_session",
"numpy.array",
"tensorflow.python.keras.backend.set_learning_phase",
"tensorflow.python.keras.testing_utils.should_run_tf_function",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.platform.test.mock.patch.object"
]
] |
CRLarry/StyleTransfer | [
"ddea81f8570232743bd7b8dbd569cf77f7cb5a28"
] | [
"royg_train.py"
] | [
"import math, random, time, pygame, sys\nfrom pygame.locals import *\nfrom PIL import Image\nimport numpy as np\nimport json\n\nprint(\"i tried\")\n\ndef random_select(distribution, color, iteration):\n random_r = int(np.random.normal(color[0], distribution[0] * 60 / (20*(iteration+1))))\n random_g = int(np.random.normal(color[1], distribution[1] * 60 / (20*(iteration+1))))\n random_b = int(np.random.normal(color[2], distribution[2] * 60 / (20*(iteration+1))))\n if (random_r > 255):\n random_r = 255\n if (random_g > 255):\n random_g = 255\n if (random_b > 255):\n random_b = 255\n if (random_r < 0):\n random_r = 0\n if (random_g < 0):\n random_g = 0\n if (random_b < 0):\n random_b = 0\n return (random_r, random_g, random_b)\n\ndef generate_color(input_key, input_color, iteration):\n\treturn (random_select(color_model[input_key], input_color, iteration))\n\ndef generate_key(input_color):\n key = int(input_color[0]/32+1)*100 + int(input_color[1]/32+1)*10 + int(input_color[2]/32+1)\n return (key)\n\nwindow_size = 1024\nnum_iterations = 2\nvalid_input = False\ngrid_colors = []\n\nif __name__ == \"__main__\":\n if (len(sys.argv) == 2):\n training_image = sys.argv[1]\n\n im = Image.open(training_image)\n pix = im.load()\n\n rgb_values = []\n color_model = {}\n\n for x in range(im.size[0]):\n these_rgbs = []\n for y in range(im.size[1]):\n these_rgbs.append(pix[x,y])\n rgb_values.append(these_rgbs)\n\n for x in range(im.size[0] / 2):\n for y in range(im.size[1] / 2):\n rgb_mean = []\n rgb_mean.append(sum([rgb_values[x*2][y*2][0], rgb_values[x*2][y*2+1][0], rgb_values[x*2+1][y*2][0], rgb_values[x*2+1][y*2+1][0]]) / 4)\n rgb_mean.append(sum([rgb_values[x*2][y*2][1], rgb_values[x*2][y*2+1][1], rgb_values[x*2+1][y*2][1], rgb_values[x*2+1][y*2+1][1]]) / 4)\n rgb_mean.append(sum([rgb_values[x*2][y*2][2], rgb_values[x*2][y*2+1][2], rgb_values[x*2+1][y*2][2], rgb_values[x*2+1][y*2+1][2]]) / 4)\n\n rgb_std = []\n rgb_std.append(int(np.std([rgb_values[x*2][y*2][0], rgb_values[x*2][y*2+1][0], rgb_values[x*2+1][y*2][0], rgb_values[x*2+1][y*2+1][0]])))\n rgb_std.append(int(np.std([rgb_values[x*2][y*2][1], rgb_values[x*2][y*2+1][1], rgb_values[x*2+1][y*2][1], rgb_values[x*2+1][y*2+1][1]])))\n rgb_std.append(int(np.std([rgb_values[x*2][y*2][2], rgb_values[x*2][y*2+1][2], rgb_values[x*2+1][y*2][2], rgb_values[x*2+1][y*2+1][2]])))\n\n key = int(rgb_mean[0]/32+1)*100 + int(rgb_mean[1]/32+1)*10 + int(rgb_mean[2]/32+1)\n\n if (key not in color_model.keys()):\n color_model[key] = [rgb_std[0], rgb_std[1], rgb_std[2], 1]\n\n else:\n color_model[key] = [(color_model[key][0]*color_model[key][3]+rgb_std[0])/(color_model[key][3]+1), (color_model[key][1]*color_model[key][3]+rgb_std[1])/(color_model[key][3]+1), (color_model[key][2]*color_model[key][3]+rgb_std[2])/(color_model[key][3]+1), color_model[key][3]+1]\n\n for x in range(8):\n for y in range(8):\n for z in range(8):\n key = (x+1)*100 + (y+1)*10 + (z+1)\n if (key not in color_model.keys()):\n color_model[key] = [int(random.uniform(8, 15)), int(random.uniform(8, 15)), int(random.uniform(8, 15)), 1]\n if (color_model[key][0] < 6):\n color_model[key][0] = int(random.uniform(8, 15))\n if (color_model[key][1] < 6):\n color_model[key][1] = int(random.uniform(8, 15))\n if (color_model[key][2] < 6):\n color_model[key][2] = int(random.uniform(8, 15))\n\n valid_input = True\n\n if(valid_input):\n for i in range(im.size[0]):\n row_colors = []\n for j in range(im.size[1]):\n row_colors.append(pix[i,j])\n grid_colors.append(row_colors)\n\n for i in range(num_iterations):\n new_grid_colors = []\n grid_colors_list = []\n for j in range(len(grid_colors[0]) * 2):\n row_colors = []\n for k in range(len(grid_colors) * 2):\n row_colors.append(generate_color(generate_key(grid_colors[k/2][j/2]) ,grid_colors[k/2][j/2], i))\n grid_colors_list.append(generate_color(generate_key(grid_colors[k/2][j/2]) ,grid_colors[k/2][j/2], i))\n new_grid_colors.append(row_colors)\n grid_colors = new_grid_colors\n # img = Image.fromarray(grid_colors, 'RGB')\n im2 = Image.new('RGB',(len(grid_colors[0]),len(grid_colors)))\n im2.putdata(grid_colors_list)\n im2.save(\"up20.jpg\")\n"
] | [
[
"numpy.random.normal",
"numpy.std"
]
] |
dominique120/12-steps-navier-stokes | [
"3e195bf7f7895f83f5f2248ef48dc13b76e8b5de"
] | [
"l3/plot.py"
] | [
"#!/usr/bin/env python\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nmatplotlib.rcParams[\"font.family\"] = \"Serif\"\nmatplotlib.rcParams[\"font.size\"] = 10\nmatplotlib.rcParams[\"axes.labelsize\"] = 10\nmatplotlib.rcParams[\"xtick.labelsize\"] = 10\nmatplotlib.rcParams[\"ytick.labelsize\"] = 10\nmatplotlib.rcParams[\"legend.fontsize\"] = 10\n\nfig = plt.figure(facecolor=\"white\")\nax = fig.gca()\nax.grid()\nax.set_axisbelow(True)\nax.set_xlabel(\"timestep\")\nax.set_title(\"Plot of u over time\")\n\nx = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])\ny = np.array([0.2727630972961541E+01,0.2771311269919911E+01,0.2814989285728558E+01,0.2858665849416731E+01,0.2902342189881139E+01,0.2946020124173951E+01,0.2989702300073073E+01,0.3033392493052979E+01,0.3077095949208672E+01,0.3120819754014078E+01,0.3164573192804217E+01,0.3208368054247055E+01,0.3252218815413303E+01,0.3296142639622408E+01,0.3340159119445302E+01,0.3384289709865549E+01,0.3428556821944811E+01,0.3472982584474947E+01,0.3517587326469731E+01,0.3562387880944926E+01,0.3607395852618360E+01,0.3652616021029425E+01,0.3698045059580436E+01,0.3743670736415854E+01,0.3789471724920397E+01,0.3835418093903271E+01,0.3881472477500543E+01,0.3927591851755142E+01,0.3973729778475908E+01,0.4019838926034867E+01,0.4065873647669411E+01,0.4111792394000463E+01,0.4157559757884302E+01,0.4203147993161763E+01,0.4248537908375252E+01,0.4293719104179188E+01,0.4338689590214909E+01,0.4383454875298046E+01,0.4428026667143412E+01,0.4472421340415166E+01,0.4516658333683488E+01,0.4560758619056785E+01,0.4604743357507668E+01,0.4648632814346878E+01,0.4692445569290600E+01,0.4736198019615245E+01,0.4779904146861352E+01,0.4823575499325555E+01,0.4867221334189969E+01,0.4910848863158982E+01,0.4954463551611633E+01,0.4998069430935068E+01,0.5041669394396616E+01,0.5085265456440628E+01,0.5128858961580264E+01,0.5172450729440230E+01,0.5216041112137861E+01,0.5259629908416969E+01,0.5303216001391483E+01,0.5346796407577470E+01,0.5390364016956128E+01,0.5433902378991060E+01,0.5477373796984951E+01,0.5520692266994955E+01,0.5563662148157763E+01,0.5605839543752792E+01,0.5646220063907935E+01,0.5682539434358074E+01,0.5709723746298654E+01,0.5716534126277256E+01,0.5678680622132141E+01,0.5546490436649654E+01,0.5230962954575717E+01,0.4621610920896421E+01,0.3723767827144663E+01,0.2832739161616575E+01,0.2256379481270987E+01,0.1991598349454642E+01,0.1903149542455375E+01,0.1893699549180197E+01,0.1916482260037323E+01,0.1951969308086713E+01,0.1992407559120753E+01,0.2034770635503815E+01,0.2077882429362218E+01,0.2121285847116006E+01,0.2164802982375364E+01,0.2208364483935747E+01,0.2251943284280958E+01,0.2295528801406756E+01,0.2339116851026652E+01,0.2382705607390911E+01,0.2426293674842139E+01,0.2469878109399315E+01,0.2513449360728478E+01,0.2556974307303974E+01,0.2600337207538654E+01,0.2643133334588122E+01,0.2683947826158554E+01,0.2717840319099818E+01,0.2727630972961541E+01])\n\nax.plot(x,y,\"b-o\",linewidth=1,markersize=3,label=\"value of u\")\n\nx = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])\ny = np.array([0.2736023075220947E+01,0.2779837164545836E+01,0.2823649045509257E+01,0.2867459600164973E+01,0.2911270141699394E+01,0.2955082615916199E+01,0.2998899859719867E+01,0.3042725915794919E+01,0.3086566393194455E+01,0.3130428850827616E+01,0.3174323165831663E+01,0.3218261833478449E+01,0.3262260132502555E+01,0.3306336083104996E+01,0.3350510127978347E+01,0.3394804482298916E+01,0.3439242127774091E+01,0.3483845467038330E+01,0.3528634703635678E+01,0.3573626062661065E+01,0.3618830009499543E+01,0.3664249650698246E+01,0.3709879505336173E+01,0.3755704814144340E+01,0.3801701508065813E+01,0.3847836893073130E+01,0.3894071032297831E+01,0.3940358730093159E+01,0.3986651955855555E+01,0.4032902497084348E+01,0.4079064607364045E+01,0.4125097418387960E+01,0.4170966914845689E+01,0.4216647322613483E+01,0.4262121827104219E+01,0.4307382610971990E+01,0.4352430269224261E+01,0.4397272716621099E+01,0.4441923740614247E+01,0.4486401369648815E+01,0.4530726221531711E+01,0.4574919973083936E+01,0.4619004056115172E+01,0.4662998642689007E+01,0.4706921941299771E+01,0.4750789790286371E+01,0.4794615508938744E+01,0.4838409951549534E+01,0.4882181704496955E+01,0.4925937369356523E+01,0.4969681883387789E+01,0.5013418839774710E+01,0.5057150781228563E+01,0.5100879449862473E+01,0.5144605981558732E+01,0.5188331031510512E+01,0.5232054803586364E+01,0.5275776916902560E+01,0.5319495949105908E+01,0.5363208279897092E+01,0.5406905366405161E+01,0.5450567464917623E+01,0.5494149280945163E+01,0.5537547298853238E+01,0.5580525607569766E+01,0.5622547965846030E+01,0.5662399018478118E+01,0.5697335399227216E+01,0.5721207407206272E+01,0.5720417383492038E+01,0.5665776539520547E+01,0.5498721631018602E+01,0.5119674798334109E+01,0.4425781384204654E+01,0.3486434650868969E+01,0.2650763109373032E+01,0.2160209517510828E+01,0.1951643566665702E+01,0.1890046900079447E+01,0.1891964276681970E+01,0.1919388886466590E+01,0.1956790527577951E+01,0.1998063161314609E+01,0.2040835692245308E+01,0.2084189902896408E+01,0.2127769988406669E+01,0.2171437866684961E+01,0.2215139874588584E+01,0.2258855136134518E+01,0.2302575516435228E+01,0.2346297799758990E+01,0.2390020545982250E+01,0.2433742519833124E+01,0.2477460863766740E+01,0.2521166124486849E+01,0.2564825375231745E+01,0.2608323275028774E+01,0.2651255604219204E+01,0.2692206176828820E+01,0.2726219167417408E+01,0.2736023075220947E+01])\n\nax.plot(x,y,\"b-o\",linewidth=1,markersize=3,label=\"value of un\")\n\nx = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])\ny = np.array([0.2778119282693222E+01,0.2821757879554102E+01,0.2865396476414983E+01,0.2909035073275863E+01,0.2952673670136744E+01,0.2996312266997624E+01,0.3039950863858505E+01,0.3083589460719385E+01,0.3127228057580266E+01,0.3170866654441146E+01,0.3214505251302026E+01,0.3258143848162907E+01,0.3301782445023787E+01,0.3345421041884667E+01,0.3389059638745548E+01,0.3432698235606428E+01,0.3476336832467309E+01,0.3519975429328189E+01,0.3563614026189069E+01,0.3607252623049950E+01,0.3650891219910831E+01,0.3694529816771711E+01,0.3738168413632591E+01,0.3781807010493472E+01,0.3825445607354352E+01,0.3869084204215233E+01,0.3912722801076113E+01,0.3956361397936993E+01,0.3999999994797874E+01,0.4043638591658754E+01,0.4087277188519635E+01,0.4130915785380515E+01,0.4174554382241396E+01,0.4218192979102276E+01,0.4261831575963156E+01,0.4305470172824037E+01,0.4349108769684917E+01,0.4392747366545797E+01,0.4436385963406678E+01,0.4480024560267559E+01,0.4523663157128439E+01,0.4567301753989319E+01,0.4610940350850200E+01,0.4654578947711080E+01,0.4698217544571961E+01,0.4741856141432841E+01,0.4785494738293721E+01,0.4829133335154602E+01,0.4872771932015482E+01,0.4916410528876363E+01,0.4960049125737243E+01,0.5003687722598123E+01,0.5047326319459004E+01,0.5090964916319884E+01,0.5134603513180765E+01,0.5178242110041645E+01,0.5221880706902526E+01,0.5265519303763406E+01,0.5309157900624286E+01,0.5352796497485167E+01,0.5396435094346045E+01,0.5440073691206913E+01,0.5483712288067701E+01,0.5527350884927927E+01,0.5570989481784174E+01,0.5614628078612207E+01,0.5658266675240245E+01,0.5701905270450581E+01,0.5745543855611327E+01,0.5789182369533984E+01,0.5832820378474495E+01,0.5876454807775165E+01,0.5920063862467133E+01,0.5963493056546985E+01,0.6005647845264028E+01,0.6038797098376929E+01,0.6009502348067070E+01,0.5598786339877734E+01,0.3999997337339063E+01,0.2401211415134112E+01,0.1990497346934744E+01,0.1961202951946962E+01,0.1994352257161830E+01,0.2036507053269433E+01,0.2079936248392683E+01,0.2123545303231861E+01,0.2167179732553298E+01,0.2210817741496738E+01,0.2254456255419809E+01,0.2298094840580612E+01,0.2341733435790958E+01,0.2385372032418996E+01,0.2429010629247029E+01,0.2472649226103276E+01,0.2516287822963503E+01,0.2559926419824292E+01,0.2603565016685159E+01,0.2647203613546037E+01,0.2690842210406918E+01,0.2734480807267798E+01,0.2778119404128678E+01])\n\nax.plot(x,y,\"b-o\",linewidth=1,markersize=3,label=\"value of u_analytical\")\n\nax.legend(loc=\"best\")\n\nplt.savefig(\"plot.png\", dpi=320)\n\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig"
]
] |
romanbird/jeopardy-bot | [
"d47600d9261fefcb5f08d699ddf8b5fdcd072da1"
] | [
"assets/csv_counter.py"
] | [
"import csv\nimport pandas as pd\nfrom tqdm import tqdm\nfrom collections import Counter\ndbRead = open('db.csv', \"r\", newline='', encoding='utf8')\ndb = list(csv.reader(dbRead, delimiter=\",\"))\ncolumn = [row[-1] for row in db]\nfor row in tqdm(db):\n row[-2]=Counter(column)[row[-1]]\ndf=pd.DataFrame(data=db)\ndf.to_csv('db.csv', sep=\",\", encoding='utf8')"
] | [
[
"pandas.DataFrame"
]
] |
xumm94/2018_data_science_bowl | [
"9f7a6b60b7c1e933c30acd8abbdeeb7bd869a3f6"
] | [
"train_nuclei.py"
] | [
"# coding: utf-8\n\n\"\"\"\nMask R-CNN - Train on Nuclei Dataset (Updated from train_shape.ipynb)\n\nThis notebook shows how to train Mask R-CNN on your own dataset. \nTo keep things simple we use a synthetic dataset of shapes (squares, \ntriangles, and circles) which enables fast training. You'd still \nneed a GPU, though, because the network backbone is a Resnet101, \nwhich would be too slow to train on a CPU. On a GPU, you can start \nto get okay-ish results in a few minutes, and good results in less than an hour.\n\"\"\"\n\n\n\nimport os\nimport sys\nimport random\nimport math\nimport re\nimport time\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom config import Config\nimport utils\nimport model as modellib\nimport visualize\nfrom model import log\nimport logging\nimport argparse\n\n\"\"\"\nConfigurations\n\nOverride form Config\n\"\"\"\n\nclass NucleiConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"nuclei\"\n\n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 2\n IMAGES_PER_GPU = 8\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 # background + 3 shapes\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 512\n IMAGE_MAX_DIM = 512\n\n # Number of ROIs per image to feed to classifier/mask heads\n # The Mask RCNN paper uses 512 but often the RPN doesn't generate\n # enough positive proposals to fill this and keep a positive:negative\n # ratio of 1:3. You can increase the number of proposals by adjusting\n # the RPN NMS threshold.\n TRAIN_ROIS_PER_IMAGE = 300\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels\n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 32\n\n # Use a small epoch since the data is simple\n STEPS_PER_EPOCH = 50\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 5\n\n LEARNING_RATE = 0.001\n\n # Maximum number of ground truth instances to use in one image\n MAX_GT_INSTANCES = 200\n\n def display(self, logger):\n \"\"\"Display Configuration values.\"\"\"\n print(\"\\nConfigurations:\")\n logger.info('\\nConfigurations:')\n for a in dir(self):\n if not a.startswith(\"__\") and not callable(getattr(self, a)):\n print(\"{:30} {}\".format(a, getattr(self, a)))\n logger.info(\"{:30} {}\".format(a, getattr(self, a)))\n print(\"\\n\")\n\nclass NucleiDataset(utils.Dataset):\n\n \"\"\"Load the images and masks from dataset.\"\"\"\n\n def load_image_info(self, data_path, img_set = None):\n \"\"\"Get the picture names(ids) of the dataset.\"\"\"\n \n # Add classes\n self.add_class(\"nucleis\", 1, \"regular\")\n # TO DO : Three different image types into three classes\n \n # Add images\n # Get the images ids of training/testing set\n if img_set is None:\n train_ids = next(os.walk(data_path))[1]\n else:\n with open(img_set) as f:\n read_data = f.readlines()\n train_ids = [read_data[i][:-1] for i in range(0,len(read_data))] # Delete New line '\\n'\n # Get the info of the images\n for i, id_ in enumerate(train_ids):\n file_path = os.path.join(data_path, id_)\n img_path = os.path.join(file_path, \"images\")\n masks_path = os.path.join(file_path, \"masks\")\n img_name = id_ + \".png\"\n img = cv2.imread(os.path.join(img_path, img_name))\n width, height, _ = img.shape\n self.add_image(\"nucleis\", image_id=id_, path=file_path,\n img_path=img_path, masks_path=masks_path,\n width=width, height=height,\n nucleis=\"nucleis\") \n\n def load_image(self, image_id):\n \"\"\"Load image from file of the given image ID.\"\"\"\n info = self.image_info[image_id]\n img_path = info[\"img_path\"]\n img_name = info[\"id\"] + \".png\"\n image = cv2.imread(os.path.join(img_path, img_name))\n return image\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the given image ID.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"nucleis\":\n return info[\"path\"]\n else:\n super(self.__class__).image_reference(self, image_id)\n\n def load_mask(self, image_id):\n \"\"\"Load the instance masks of the given image ID.\"\"\"\n info = self.image_info[image_id]\n mask_files = next(os.walk(info[\"masks_path\"]))[2]\n masks = np. zeros([info['width'], info['height'], len(mask_files)], dtype=np.uint8)\n for i, id_ in enumerate(mask_files):\n single_mask = cv2.imread(os.path.join(info[\"masks_path\"], id_), 0)\n masks[:, :, i:i+1] = single_mask[:, :, np.newaxis]\n class_ids = np.ones(len(mask_files))\n return masks, class_ids.astype(np.int32)\n\n # def test(self):\n # return \"1\"\n\ndef rle_encoding(x):\n dots = np.where(x.T.flatten() == 1)[0]\n run_lengths = []\n prev = -2\n for b in dots:\n if (b>prev+1): run_lengths.extend((b + 1, 0))\n run_lengths[-1] += 1\n prev = b\n return run_lengths\n\ndef parser_argument():\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN on Nuclei Dataset.')\n parser.add_argument(\"command\",\n metavar=\"<command>\",\n help=\"'train' or 'evaluate' or predict\")\n parser.add_argument('--datapath',\n metavar=\"/path/to/data/\",\n default=\"./data\",\n help='Directory of the Nuclei dataset')\n parser.add_argument('--init_with',\n metavar=\"/init/type\",\n default=\"coco\",\n help=\"Initialize with the (\\\"coco\\\"/\\\"imagenet\\\"/\\\"last\\\") net\")\n parser.add_argument('--model',\n metavar=\"/path/to/weights.h5\",\n default=\"./models/mask_rcnn_coco.h5\",\n help=\"Path to weights .h5 file\")\n parser.add_argument('--ckpt',\n metavar=\"/path/to/save/checkpoint\",\n default=\"/data/lf/Nuclei/logs\",\n help=\"Directory of the checkpoint\")\n parser.add_argument('--epochs',\n metavar=\"/num/of/epochs\",\n default=\"50\",\n help=\"The number of the training epochs\")\n parser.add_argument('--finetune',\n metavar=\"/finetune/type\",\n default=\"heads\",\n help=\"The type of the finetune method(\\\"heads\\\" or \\\"all\\\")\")\n parser.add_argument('--lr_start',\n metavar=\"/value/of/start/lr\",\n default=\"0.001\",\n type=float,\n help=\"The Value of learning rate to start\")\n parser.add_argument('--train_dataset',\n metavar=\"train/imgs/names\",\n default=\"10-fold-train-1.txt\",\n help=\"The training set split of the data\")\n parser.add_argument('--val_dataset',\n metavar=\"val/imgs/names\",\n default=\"10-fold-val-1.txt\",\n help=\"The validation set split of the data\")\n\n return parser.parse_args()\n\nif __name__ == '__main__':\n \n args = parser_argument()\n logname = \"config-\" + time.strftime('%Y%m%d%H%M', time.localtime(time.time())) +\".log\"\n logging.basicConfig(filename=os.path.join(args.ckpt, logname), level=logging.INFO)\n logger = logging.getLogger('root')\n logger.info('\\nBasic Setting:')\n logger.info('\\nCommand: {} \\n Initialize: {} \\n Model: {} \\n Datapath: {} \\n Ckpt: {} \\n Epochs \\\n : {} \\n Finetune: {} \\n Train_dataset: {} \\n Val_dataset: {} \\n' \\\n .format(args.command, args.init_with, args.model, args.datapath, args.ckpt,\\\n args.epochs, args.finetune, args.train_dataset, args.val_dataset))\n\n # Train or evaluate or predict\n if args.command == \"train\":\n\n config = NucleiConfig()\n config.LEARNING_RATE = args.lr_start\n config.display(logger)\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=args.ckpt)\n \n # Select weights file to load\n print(\"Loading weights From \", args.model)\n\n if args.init_with == \"imagenet\":\n model.load_weights(model.get_imagenet_weights(), by_name=True)\n elif args.init_with == \"coco\":\n # Load weights trained on MS COCO, but skip layers that\n # are different due to the different number of classes\n # See README for instructions to download the COCO weights\n model.load_weights(args.model, by_name=True,\n exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\", \n \"mrcnn_bbox\", \"mrcnn_mask\"])\n elif args.init_with == \"last\":\n # Load the last model you trained and continue training\n model.load_weights(args.model, by_name=True)\n\n # Training dataset. Use the training set and 35K from the\n # validation set, as as in the Mask RCNN paper.\n DATASET_DIR = os.path.join(args.datapath, \"stage1_train_fixed\")\n TRAIN_IMG_SET = os.path.join(args.datapath, \"stage1_train_fixed_10fold\", args.train_dataset)\n VAL_IMG_SET = os.path.join(args.datapath, \"stage1_train_fixed_10fold\", args.val_dataset)\n\n dataset_train = NucleiDataset()\n dataset_train.load_image_info(DATASET_DIR, TRAIN_IMG_SET)\n dataset_train.prepare()\n\n dataset_val = NucleiDataset()\n dataset_val.load_image_info(DATASET_DIR, VAL_IMG_SET)\n dataset_val.prepare()\n\n print(\"Loading {} training images, {} validation images\"\n .format(len(dataset_train.image_ids), len(dataset_val.image_ids)))\n\n\n if args.finetune == \"heads\":\n model.train(dataset_train, dataset_val, \n learning_rate=config.LEARNING_RATE, \n epochs=int(args.epochs), \n layers='heads',\n logger=logger)\n elif args.finetune == \"all\":\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=int(args.epochs),\n layers='all',\n logger=logger)\n else: \n raise NameError(\"Only two finetune type is vaild(\\\"heads\\\" or \\\"all\\\")\")\n\n\n elif args.command == \"evaluate\": \n # TODO AP in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]\n class InferenceConfig(NucleiConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n DETECTION_MAX_INSTANCES = 300\n config = InferenceConfig()\n config.display()\n\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.ckpt)\n\n print(\"Loading weights From \", args.model)\n model.load_weights(args.model, by_name=True)\n\n VALSET_DIR = os.path.join(args.dataset, \"stage1_val\")\n dataset_val = NucleiDataset()\n dataset_val.load_image_info(VALSET_DIR)\n dataset_val.prepare()\n print(\"Evaluate {} images\".format(len(dataset_val.image_ids)))\n\n APs = []\n\n for image_id in tqdm(dataset_val.image_ids):\n # Load image and ground truth data\n image, image_meta, gt_class_id, gt_bbox, gt_mask =modellib.load_image_gt(\n dataset_val, InferenceConfig, image_id, use_mini_mask=False)\n molded_images = np.expand_dims(modellib.mold_image(image, config), 0)\n\n # Run object detection\n results = model.detect([image], verbose=0)\n r = results[0]\n\n # Compute AP\n AP, precisions, recalls, overlaps =utils.compute_ap(gt_bbox, \n gt_class_id,r[\"rois\"], r[\"class_ids\"], r[\"scores\"], iou_threshold=0.5)\n APs.append(AP)\n\n print(\"mAP: \", np.mean(APs))\n\n elif args.command == \"predict\":\n\n class InferenceConfig(NucleiConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n DETECTION_NMS_THRESHOLD = 0.3\n DETECTION_MAX_INSTANCES = 300\n\n config = InferenceConfig()\n config.display()\n\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.ckpt)\n\n print(\"Loading weights From \", args.model)\n model.load_weights(args.model, by_name=True)\n\n TESTSET_DIR = os.path.join(args.dataset, \"stage1_test\")\n dataset_test = NucleiDataset()\n dataset_test.load_image_info(TESTSET_DIR)\n dataset_test.prepare()\n\n print(\"Predict {} images\".format(dataset_test.num_images))\n\n test_ids = []\n test_rles = []\n\n for image_id in tqdm(dataset_test.image_ids):\n image = dataset_test.load_image(image_id)\n id_ = dataset_test.image_info[image_id][\"id\"]\n results = model.detect([image], verbose=0)\n r = results[0]\n mask_exist = np.zeros(r['masks'].shape[:-1], dtype=np.uint8)\n for i in range(r['masks'].shape[-1]):\n _mask = r['masks'][:,:,i]\n overlap_index = np.where(np.multiply(mask_exist, _mask) == 1)\n _mask[overlap_index] = 0\n mask_exist += _mask\n if np.any(_mask):\n test_ids.append(id_)\n test_rles.append(rle_encoding(_mask))\n else :\n continue\n # if np.count_nonzero(_mask) > 40 :\n # test_ids.append(id_)\n # test_rles.append(rle_encoding(_mask))\n # else :\n # continue\n\n sub = pd.DataFrame()\n sub['ImageId'] = test_ids\n sub['EncodedPixels'] = pd.Series(test_rles).apply(lambda x: ' '.join(str(y) for y in x))\n csvpath = \"{}.csv\".format(args.model)\n print(\"Writing the Result in {}\".format(csvpath))\n sub.to_csv(csvpath, index=False)\n\n else:\n print(\"'{}' is not recognized. Use 'train' 'evaluate' 'predict'\".format(args.command))\n\n\n"
] | [
[
"numpy.multiply",
"pandas.Series",
"numpy.zeros",
"numpy.any",
"pandas.DataFrame",
"numpy.mean"
]
] |
mitenjain/protpore | [
"06b779473c4bf9f9c8c4305aa08873ae75386886"
] | [
"proteinhmmvisualize.py"
] | [
"'''\r\nAuthor: Hannah Meyers\r\n\r\nThis file contains the experiment code for attempting to model\r\nprotein nanopore traces via HMMs. Please see inline comments\r\nfor an explanation of what each piece of the code is doing.\r\n'''\r\nfrom __future__ import print_function\r\n\r\nfrom PyPore.parsers import *\r\nfrom PyPore.DataTypes import *\r\nfrom hmm import *\r\nfrom yahmm import *\r\n\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport itertools as it\r\nimport glob\r\nimport seaborn as sns\r\nimport sys\r\nimport pandas as pd\r\nfrom proteinDists import *\r\nfrom scipy.stats import kde\r\n\r\n\r\n\r\n#Experiment data files. The first set before the break are all experiment files from\r\n#the same day of data collection. Files after the break are each from different days.\r\nfilenames = [\r\n\r\n#\"ProteinFiles/12907001-s05.abf\"\r\n#\"ProteinFiles/13311001-s05.abf\"\r\n\"experiment_data/13n25010-s05.abf\",\r\n#\"experiment_data/13n25001-s05.abf\",\r\n#\"experiment_data/13n25005-s05.abf\",\r\n#\"experiment_data/13n25007-s05.abf\",\r\n#\"experiment_data/13n25012-s05.abf\",#bad\r\n#----#\r\n#\"experiment_data/13n12001-s05.abf\",\r\n#\"experiment_data/13n13006-s05.abf\",\r\n#\"experiment_data/14131001-s05.abf\",\r\n#---#\r\n#\"experiment_data/14410016-s05.abf\"\r\n] \r\n\r\n#Inserts are uniform across the range of current we expect to see in an event\r\ninsert1 = MultivariateDistribution( [ UniformDistribution( 0, 40 ), UniformDistribution( 0, 10 ) ] )\r\n\r\n#Create first local model\r\nprofile_means = pd.read_csv( 'profile_data/profilemeans.csv' )\r\nprofile_stds = pd.read_csv( 'profile_data/profilestds.csv' )\r\n\r\n#Convert CSV data to distribution objects\r\ndists_means = [ NormalDistribution( profile_means[col].mean(), profile_means[col].std() ) for col in profile_means ] \r\ndists_stds = [ LogNormalDistribution( np.log( profile_stds[col] ).mean(), np.log( profile_stds[col] ).std() ) for col in profile_stds ]\r\n\r\n\r\n#build multivariate profile with distributions of means/std deviations\r\nprofile = [ MultivariateDistribution([ mean, std ]) for mean, std in it.izip( dists_means, dists_stds ) ]\r\n#profile[5] = MultivariateDistribution([ ExtremeValueDistribution( 20, 10 ), LogNormalDistribution( np.log(4.5), np.log(3.5) ) ])\r\n\r\n#print(profile[5])\r\n\r\n#list of board functions corresponds to the 11 profile positions\r\nboardlist = [ProteinDomainBoard2]*2 +[ProteinDomainBoard]*9\r\n\r\n#build model\r\nmodel = ModularDomainProfileModel2( boardlist, profile, \"ClpXProfile-{}\".format( len(profile) ), insert1)\r\n\r\n\r\n\r\n#iteration for applying model to events in filenames list and plotting\r\nfor file in it.imap( File, filenames ):\r\n x = 1\r\n \r\n print(file.filename)\r\n #Events must drop below this threshold\r\n threshold = 38\r\n rules = [lambda event: event.duration > 1000000,\r\n lambda event: event.min > -5,\r\n lambda event: event.max < threshold]\r\n \r\n file.parse( lambda_event_parser( threshold=threshold, rules = rules ) )\r\n \r\n for event in file.events:\r\n event.filter()\r\n \r\n print(event)\r\n #false_positive_rate controls the number of segments that will be created by the segmenter\r\n event.parse( SpeedyStatSplit( min_width=5, false_positive_rate=1e-65, cutoff_freq = 2000) )\r\n \r\n #print(event.segments)\r\n \r\n \r\n #Apply HMM to event\r\n _, hidden_states = model.viterbi( np.array( [[ seg.mean, seg.std] for seg in event.segments ] ) )\r\n if hidden_states != None:\r\n \r\n #First subplot is event + segmentation\r\n plt.figure( figsize=(20, 8))\r\n plt.subplot( 311 )\r\n event.plot( color='cycle' )\r\n\r\n #Second subplot is event + HMM\r\n plt.subplot( 312 )\r\n event.plot( color='hmm', hmm=model, hidden_states=hidden_states, cmap='Set1' )\r\n\r\n #Final subplot is color cycle with profile means\r\n #this subplot is currently inaccurate as it only plots the first profile\r\n #furthermore, there was a bug in PyPore when I started on this that makes the color cycle\r\n #not match up to the HMM colors. I am unsure if the bug has been fixed since then.\r\n ax = plt.subplot( 313 )\r\n plt.imshow( [ np.arange( 0., len(profile) ) / len(profile) ], interpolation='nearest', cmap=\"Set1\" )\r\n plt.grid( False )\r\n means = [ d.parameters[0][0].parameters[0] for d in profile ]\r\n for i, mean in enumerate( means ):\r\n plt.text( i-0.2, 0.1, str( round(mean, 1) ), fontsize=12 )\r\n \r\n #Output HMM state path to output.txt file\r\n outputtext = 'output' + str(x) + '.txt'\r\n f = open(outputtext, 'w')\r\n for i, state in enumerate( hidden_states ):\r\n f.write(state[1].name+\"\\n\")\r\n f.close()\r\n \r\n #s = file.filename[16:] +'fp55s' + str(x)\r\n s = 'backslip' + str(x)\r\n #save figure with name s + counter to prevent name duplications\r\n plt.savefig(s)\r\n x += 1\r\n \r\n #show figure\r\n #plt.show()\r\n file.close()\r\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.subplot"
]
] |
cloudscenes/geopandas | [
"409d8f0a1562df088ce28c39a48fe4df669660fe"
] | [
"geopandas/tools/tests/test_clip.py"
] | [
"\"\"\"Tests for the clip module.\"\"\"\n\nimport warnings\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\n\nimport shapely\nfrom shapely.geometry import (\n Polygon,\n Point,\n LineString,\n LinearRing,\n GeometryCollection,\n MultiPoint,\n)\n\nimport geopandas\nfrom geopandas import GeoDataFrame, GeoSeries, clip\n\nfrom geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\nimport pytest\n\n\npytestmark = pytest.mark.skip_no_sindex\npandas_133 = pd.__version__ == LooseVersion(\"1.3.3\")\n\n\[email protected]\ndef point_gdf():\n \"\"\"Create a point GeoDataFrame.\"\"\"\n pts = np.array([[2, 2], [3, 4], [9, 8], [-12, -15]])\n gdf = GeoDataFrame([Point(xy) for xy in pts], columns=[\"geometry\"], crs=\"EPSG:3857\")\n return gdf\n\n\[email protected]\ndef pointsoutside_nooverlap_gdf():\n \"\"\"Create a point GeoDataFrame. Its points are all outside the single\n rectangle, and its bounds are outside the single rectangle's.\"\"\"\n pts = np.array([[5, 15], [15, 15], [15, 20]])\n gdf = GeoDataFrame([Point(xy) for xy in pts], columns=[\"geometry\"], crs=\"EPSG:3857\")\n return gdf\n\n\[email protected]\ndef pointsoutside_overlap_gdf():\n \"\"\"Create a point GeoDataFrame. Its points are all outside the single\n rectangle, and its bounds are overlapping the single rectangle's.\"\"\"\n pts = np.array([[5, 15], [15, 15], [15, 5]])\n gdf = GeoDataFrame([Point(xy) for xy in pts], columns=[\"geometry\"], crs=\"EPSG:3857\")\n return gdf\n\n\[email protected]\ndef single_rectangle_gdf():\n \"\"\"Create a single rectangle for clipping.\"\"\"\n poly_inters = Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])\n gdf = GeoDataFrame([1], geometry=[poly_inters], crs=\"EPSG:3857\")\n gdf[\"attr2\"] = \"site-boundary\"\n return gdf\n\n\[email protected]\ndef larger_single_rectangle_gdf():\n \"\"\"Create a slightly larger rectangle for clipping.\n The smaller single rectangle is used to test the edge case where slivers\n are returned when you clip polygons. This fixture is larger which\n eliminates the slivers in the clip return.\n \"\"\"\n poly_inters = Polygon([(-5, -5), (-5, 15), (15, 15), (15, -5), (-5, -5)])\n gdf = GeoDataFrame([1], geometry=[poly_inters], crs=\"EPSG:3857\")\n gdf[\"attr2\"] = [\"study area\"]\n return gdf\n\n\[email protected]\ndef buffered_locations(point_gdf):\n \"\"\"Buffer points to create a multi-polygon.\"\"\"\n buffered_locs = point_gdf\n buffered_locs[\"geometry\"] = buffered_locs.buffer(4)\n buffered_locs[\"type\"] = \"plot\"\n return buffered_locs\n\n\[email protected]\ndef donut_geometry(buffered_locations, single_rectangle_gdf):\n \"\"\"Make a geometry with a hole in the middle (a donut).\"\"\"\n donut = geopandas.overlay(\n buffered_locations, single_rectangle_gdf, how=\"symmetric_difference\"\n )\n return donut\n\n\[email protected]\ndef two_line_gdf():\n \"\"\"Create Line Objects For Testing\"\"\"\n linea = LineString([(1, 1), (2, 2), (3, 2), (5, 3)])\n lineb = LineString([(3, 4), (5, 7), (12, 2), (10, 5), (9, 7.5)])\n gdf = GeoDataFrame([1, 2], geometry=[linea, lineb], crs=\"EPSG:3857\")\n return gdf\n\n\[email protected]\ndef multi_poly_gdf(donut_geometry):\n \"\"\"Create a multi-polygon GeoDataFrame.\"\"\"\n multi_poly = donut_geometry.unary_union\n out_df = GeoDataFrame(geometry=GeoSeries(multi_poly), crs=\"EPSG:3857\")\n out_df[\"attr\"] = [\"pool\"]\n return out_df\n\n\[email protected]\ndef multi_line(two_line_gdf):\n \"\"\"Create a multi-line GeoDataFrame.\n This GDF has one multiline and one regular line.\"\"\"\n # Create a single and multi line object\n multiline_feat = two_line_gdf.unary_union\n linec = LineString([(2, 1), (3, 1), (4, 1), (5, 2)])\n out_df = GeoDataFrame(geometry=GeoSeries([multiline_feat, linec]), crs=\"EPSG:3857\")\n out_df[\"attr\"] = [\"road\", \"stream\"]\n return out_df\n\n\[email protected]\ndef multi_point(point_gdf):\n \"\"\"Create a multi-point GeoDataFrame.\"\"\"\n multi_point = point_gdf.unary_union\n out_df = GeoDataFrame(\n geometry=GeoSeries(\n [multi_point, Point(2, 5), Point(-11, -14), Point(-10, -12)]\n ),\n crs=\"EPSG:3857\",\n )\n out_df[\"attr\"] = [\"tree\", \"another tree\", \"shrub\", \"berries\"]\n return out_df\n\n\[email protected]\ndef mixed_gdf():\n \"\"\"Create a Mixed Polygon and LineString For Testing\"\"\"\n point = Point([(2, 3), (11, 4), (7, 2), (8, 9), (1, 13)])\n line = LineString([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])\n poly = Polygon([(3, 4), (5, 2), (12, 2), (10, 5), (9, 7.5)])\n ring = LinearRing([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])\n gdf = GeoDataFrame(\n [1, 2, 3, 4], geometry=[point, poly, line, ring], crs=\"EPSG:3857\"\n )\n return gdf\n\n\[email protected]\ndef geomcol_gdf():\n \"\"\"Create a Mixed Polygon and LineString For Testing\"\"\"\n point = Point([(2, 3), (11, 4), (7, 2), (8, 9), (1, 13)])\n poly = Polygon([(3, 4), (5, 2), (12, 2), (10, 5), (9, 7.5)])\n coll = GeometryCollection([point, poly])\n gdf = GeoDataFrame([1], geometry=[coll], crs=\"EPSG:3857\")\n return gdf\n\n\[email protected]\ndef sliver_line():\n \"\"\"Create a line that will create a point when clipped.\"\"\"\n linea = LineString([(10, 5), (13, 5), (15, 5)])\n lineb = LineString([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])\n gdf = GeoDataFrame([1, 2], geometry=[linea, lineb], crs=\"EPSG:3857\")\n return gdf\n\n\ndef test_not_gdf(single_rectangle_gdf):\n \"\"\"Non-GeoDataFrame inputs raise attribute errors.\"\"\"\n with pytest.raises(TypeError):\n clip((2, 3), single_rectangle_gdf)\n with pytest.raises(TypeError):\n clip(single_rectangle_gdf, (2, 3))\n\n\ndef test_returns_gdf(point_gdf, single_rectangle_gdf):\n \"\"\"Test that function returns a GeoDataFrame (or GDF-like) object.\"\"\"\n out = clip(point_gdf, single_rectangle_gdf)\n assert isinstance(out, GeoDataFrame)\n\n\ndef test_returns_series(point_gdf, single_rectangle_gdf):\n \"\"\"Test that function returns a GeoSeries if GeoSeries is passed.\"\"\"\n out = clip(point_gdf.geometry, single_rectangle_gdf)\n assert isinstance(out, GeoSeries)\n\n\ndef test_non_overlapping_geoms():\n \"\"\"Test that a bounding box returns empty if the extents don't overlap\"\"\"\n unit_box = Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])\n unit_gdf = GeoDataFrame([1], geometry=[unit_box], crs=\"EPSG:3857\")\n non_overlapping_gdf = unit_gdf.copy()\n non_overlapping_gdf = non_overlapping_gdf.geometry.apply(\n lambda x: shapely.affinity.translate(x, xoff=20)\n )\n out = clip(unit_gdf, non_overlapping_gdf)\n assert_geodataframe_equal(out, unit_gdf.iloc[:0])\n out2 = clip(unit_gdf.geometry, non_overlapping_gdf)\n assert_geoseries_equal(out2, GeoSeries(crs=unit_gdf.crs))\n\n\ndef test_clip_points(point_gdf, single_rectangle_gdf):\n \"\"\"Test clipping a points GDF with a generic polygon geometry.\"\"\"\n clip_pts = clip(point_gdf, single_rectangle_gdf)\n pts = np.array([[2, 2], [3, 4], [9, 8]])\n exp = GeoDataFrame([Point(xy) for xy in pts], columns=[\"geometry\"], crs=\"EPSG:3857\")\n assert_geodataframe_equal(clip_pts, exp)\n\n\ndef test_clip_points_geom_col_rename(point_gdf, single_rectangle_gdf):\n \"\"\"Test clipping a points GDF with a generic polygon geometry.\"\"\"\n point_gdf_geom_col_rename = point_gdf.rename_geometry(\"geometry2\")\n clip_pts = clip(point_gdf_geom_col_rename, single_rectangle_gdf)\n pts = np.array([[2, 2], [3, 4], [9, 8]])\n exp = GeoDataFrame(\n [Point(xy) for xy in pts],\n columns=[\"geometry2\"],\n crs=\"EPSG:3857\",\n geometry=\"geometry2\",\n )\n assert_geodataframe_equal(clip_pts, exp)\n\n\ndef test_clip_poly(buffered_locations, single_rectangle_gdf):\n \"\"\"Test clipping a polygon GDF with a generic polygon geometry.\"\"\"\n clipped_poly = clip(buffered_locations, single_rectangle_gdf)\n assert len(clipped_poly.geometry) == 3\n assert all(clipped_poly.geom_type == \"Polygon\")\n\n\ndef test_clip_poly_geom_col_rename(buffered_locations, single_rectangle_gdf):\n \"\"\"Test clipping a polygon GDF with a generic polygon geometry.\"\"\"\n\n poly_gdf_geom_col_rename = buffered_locations.rename_geometry(\"geometry2\")\n clipped_poly = clip(poly_gdf_geom_col_rename, single_rectangle_gdf)\n assert len(clipped_poly.geometry) == 3\n assert \"geometry\" not in clipped_poly.keys()\n assert \"geometry2\" in clipped_poly.keys()\n\n\ndef test_clip_poly_series(buffered_locations, single_rectangle_gdf):\n \"\"\"Test clipping a polygon GDF with a generic polygon geometry.\"\"\"\n clipped_poly = clip(buffered_locations.geometry, single_rectangle_gdf)\n assert len(clipped_poly) == 3\n assert all(clipped_poly.geom_type == \"Polygon\")\n\n\[email protected](pandas_133, reason=\"Regression in pandas 1.3.3 (GH #2101)\")\ndef test_clip_multipoly_keep_slivers(multi_poly_gdf, single_rectangle_gdf):\n \"\"\"Test a multi poly object where the return includes a sliver.\n Also the bounds of the object should == the bounds of the clip object\n if they fully overlap (as they do in these fixtures).\"\"\"\n clipped = clip(multi_poly_gdf, single_rectangle_gdf)\n assert np.array_equal(clipped.total_bounds, single_rectangle_gdf.total_bounds)\n # Assert returned data is a geometry collection given sliver geoms\n assert \"GeometryCollection\" in clipped.geom_type[0]\n\n\[email protected](pandas_133, reason=\"Regression in pandas 1.3.3 (GH #2101)\")\ndef test_clip_multipoly_keep_geom_type(multi_poly_gdf, single_rectangle_gdf):\n \"\"\"Test a multi poly object where the return includes a sliver.\n Also the bounds of the object should == the bounds of the clip object\n if they fully overlap (as they do in these fixtures).\"\"\"\n clipped = clip(multi_poly_gdf, single_rectangle_gdf, keep_geom_type=True)\n assert np.array_equal(clipped.total_bounds, single_rectangle_gdf.total_bounds)\n # Assert returned data is a not geometry collection\n assert (clipped.geom_type == \"Polygon\").any()\n\n\ndef test_clip_single_multipoly_no_extra_geoms(\n buffered_locations, larger_single_rectangle_gdf\n):\n \"\"\"When clipping a multi-polygon feature, no additional geom types\n should be returned.\"\"\"\n multi = buffered_locations.dissolve(by=\"type\").reset_index()\n clipped = clip(multi, larger_single_rectangle_gdf)\n assert clipped.geom_type[0] == \"Polygon\"\n\n\ndef test_clip_multiline(multi_line, single_rectangle_gdf):\n \"\"\"Test that clipping a multiline feature with a poly returns expected output.\"\"\"\n clipped = clip(multi_line, single_rectangle_gdf)\n assert clipped.geom_type[0] == \"MultiLineString\"\n\n\ndef test_clip_multipoint(single_rectangle_gdf, multi_point):\n \"\"\"Clipping a multipoint feature with a polygon works as expected.\n should return a geodataframe with a single multi point feature\"\"\"\n clipped = clip(multi_point, single_rectangle_gdf)\n assert clipped.geom_type[0] == \"MultiPoint\"\n assert hasattr(clipped, \"attr\")\n # All points should intersect the clip geom\n assert len(clipped) == 2\n clipped_mutltipoint = MultiPoint(\n [\n Point(2, 2),\n Point(3, 4),\n Point(9, 8),\n ]\n )\n assert clipped.iloc[0].geometry.wkt == clipped_mutltipoint.wkt\n assert all(clipped.intersects(single_rectangle_gdf.unary_union))\n\n\ndef test_clip_lines(two_line_gdf, single_rectangle_gdf):\n \"\"\"Test what happens when you give the clip_extent a line GDF.\"\"\"\n clip_line = clip(two_line_gdf, single_rectangle_gdf)\n assert len(clip_line.geometry) == 2\n\n\ndef test_clip_with_multipolygon(buffered_locations, single_rectangle_gdf):\n \"\"\"Test clipping a polygon with a multipolygon.\"\"\"\n multi = buffered_locations.dissolve(by=\"type\").reset_index()\n clipped = clip(single_rectangle_gdf, multi)\n assert clipped.geom_type[0] == \"Polygon\"\n\n\ndef test_mixed_geom(mixed_gdf, single_rectangle_gdf):\n \"\"\"Test clipping a mixed GeoDataFrame\"\"\"\n clipped = clip(mixed_gdf, single_rectangle_gdf)\n assert (\n clipped.geom_type[0] == \"Point\"\n and clipped.geom_type[1] == \"Polygon\"\n and clipped.geom_type[2] == \"LineString\"\n )\n\n\ndef test_mixed_series(mixed_gdf, single_rectangle_gdf):\n \"\"\"Test clipping a mixed GeoSeries\"\"\"\n clipped = clip(mixed_gdf.geometry, single_rectangle_gdf)\n assert (\n clipped.geom_type[0] == \"Point\"\n and clipped.geom_type[1] == \"Polygon\"\n and clipped.geom_type[2] == \"LineString\"\n )\n\n\ndef test_clip_warning_no_extra_geoms(buffered_locations, single_rectangle_gdf):\n \"\"\"Test a user warning is provided if no new geometry types are found.\"\"\"\n with pytest.warns(UserWarning):\n clip(buffered_locations, single_rectangle_gdf, True)\n warnings.warn(\n \"keep_geom_type was called when no extra geometry types existed.\",\n UserWarning,\n )\n\n\ndef test_clip_with_polygon(single_rectangle_gdf):\n \"\"\"Test clip when using a shapely object\"\"\"\n polygon = Polygon([(0, 0), (5, 12), (10, 0), (0, 0)])\n clipped = clip(single_rectangle_gdf, polygon)\n exp_poly = polygon.intersection(\n Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])\n )\n exp = GeoDataFrame([1], geometry=[exp_poly], crs=\"EPSG:3857\")\n exp[\"attr2\"] = \"site-boundary\"\n assert_geodataframe_equal(clipped, exp)\n\n\ndef test_clip_with_line_extra_geom(single_rectangle_gdf, sliver_line):\n \"\"\"When the output of a clipped line returns a geom collection,\n and keep_geom_type is True, no geometry collections should be returned.\"\"\"\n clipped = clip(sliver_line, single_rectangle_gdf, keep_geom_type=True)\n assert len(clipped.geometry) == 1\n # Assert returned data is a not geometry collection\n assert not (clipped.geom_type == \"GeometryCollection\").any()\n\n\ndef test_clip_line_keep_slivers(single_rectangle_gdf, sliver_line):\n \"\"\"Test the correct output if a point is returned\n from a line only geometry type.\"\"\"\n clipped = clip(sliver_line, single_rectangle_gdf)\n # Assert returned data is a geometry collection given sliver geoms\n assert \"Point\" == clipped.geom_type[0]\n assert \"LineString\" == clipped.geom_type[1]\n\n\ndef test_clip_no_box_overlap(pointsoutside_nooverlap_gdf, single_rectangle_gdf):\n \"\"\"Test clip when intersection is empty and boxes do not overlap.\"\"\"\n clipped = clip(pointsoutside_nooverlap_gdf, single_rectangle_gdf)\n assert len(clipped) == 0\n\n\ndef test_clip_box_overlap(pointsoutside_overlap_gdf, single_rectangle_gdf):\n \"\"\"Test clip when intersection is empty and boxes do overlap.\"\"\"\n clipped = clip(pointsoutside_overlap_gdf, single_rectangle_gdf)\n assert len(clipped) == 0\n\n\ndef test_warning_extra_geoms_mixed(single_rectangle_gdf, mixed_gdf):\n \"\"\"Test the correct warnings are raised if keep_geom_type is\n called on a mixed GDF\"\"\"\n with pytest.warns(UserWarning):\n clip(mixed_gdf, single_rectangle_gdf, keep_geom_type=True)\n\n\ndef test_warning_geomcoll(single_rectangle_gdf, geomcol_gdf):\n \"\"\"Test the correct warnings are raised if keep_geom_type is\n called on a GDF with GeometryCollection\"\"\"\n with pytest.warns(UserWarning):\n clip(geomcol_gdf, single_rectangle_gdf, keep_geom_type=True)\n\n\ndef test_warning_crs_mismatch(point_gdf, single_rectangle_gdf):\n with pytest.warns(UserWarning, match=\"CRS mismatch between the CRS\"):\n clip(point_gdf, single_rectangle_gdf.to_crs(4326))\n"
] | [
[
"numpy.array",
"numpy.array_equal"
]
] |
mendelmaker/yolact | [
"83e7d08f03951c49a9731759e8458c51fe0922d7"
] | [
"eval.py"
] | [
"import json\nimport numpy as np\nimport torch\nimport pycocotools\nimport argparse\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom terminaltables import AsciiTable\nfrom collections import OrderedDict\nimport torch.backends.cudnn as cudnn\n\nfrom data.coco import COCODetection\nfrom modules.build_yolact import Yolact\nfrom utils.augmentations import BaseTransform\nfrom utils.functions import MovingAverage, ProgressBar\nfrom utils.box_utils import bbox_iou, mask_iou\nfrom utils import timer\nfrom utils.output_utils import after_nms, NMS\nfrom data.config import cfg, update_config, COCO_LABEL_MAP\n\nparser = argparse.ArgumentParser(description='YOLACT COCO Evaluation')\nparser.add_argument('--trained_model', default='yolact_base_54_800000.pth', type=str)\nparser.add_argument('--visual_top_k', default=5, type=int, help='Further restrict the number of predictions to parse')\nparser.add_argument('--traditional_nms', default=False, action='store_true', help='Whether to use traditional nms.')\nparser.add_argument('--max_num', default=-1, type=int, help='The maximum number of images for test, set to -1 for all.')\nparser.add_argument('--cocoapi', action='store_true', help='Whether to use cocoapi to evaluate results.')\n\n\nclass Make_json:\n def __init__(self):\n self.bbox_data = []\n self.mask_data = []\n self.coco_cats = {}\n\n for coco_id, real_id in COCO_LABEL_MAP.items():\n class_id = real_id - 1\n self.coco_cats[class_id] = coco_id\n\n def add_bbox(self, image_id: int, category_id: int, bbox: list, score: float):\n \"\"\" Note that bbox should be a list or tuple of (x1, y1, x2, y2) \"\"\"\n bbox = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]\n\n # Round to the nearest 10th to avoid huge file sizes, as COCO suggests\n bbox = [round(float(x) * 10) / 10 for x in bbox]\n\n self.bbox_data.append({'image_id': int(image_id),\n 'category_id': self.coco_cats[int(category_id)],\n 'bbox': bbox,\n 'score': float(score)})\n\n def add_mask(self, image_id: int, category_id: int, segmentation: np.ndarray, score: float):\n \"\"\" The segmentation should be the full mask, the size of the image and with size [h, w]. \"\"\"\n rle = pycocotools.mask.encode(np.asfortranarray(segmentation.astype(np.uint8)))\n rle['counts'] = rle['counts'].decode('ascii') # json.dump doesn't like bytes strings\n\n self.mask_data.append({'image_id': int(image_id),\n 'category_id': self.coco_cats[int(category_id)],\n 'segmentation': rle,\n 'score': float(score)})\n\n def dump(self):\n dump_arguments = [(self.bbox_data, f'results/bbox_detections.json'),\n (self.mask_data, f'results/mask_detections.json')]\n\n for data, path in dump_arguments:\n with open(path, 'w') as f:\n json.dump(data, f)\n\n\nclass APDataObject:\n \"\"\"Stores all the information necessary to calculate the AP for one IoU and one class.\"\"\"\n\n def __init__(self):\n self.data_points = []\n self.num_gt_positives = 0\n\n def push(self, score: float, is_true: bool):\n self.data_points.append((score, is_true))\n\n def add_gt_positives(self, num_positives: int):\n \"\"\" Call this once per image. \"\"\"\n self.num_gt_positives += num_positives\n\n def is_empty(self) -> bool:\n return len(self.data_points) == 0 and self.num_gt_positives == 0\n\n def get_ap(self) -> float:\n \"\"\" Warning: result not cached. \"\"\"\n\n if self.num_gt_positives == 0:\n return 0\n\n # Sort descending by score\n self.data_points.sort(key=lambda x: -x[0])\n\n precisions = []\n recalls = []\n num_true = 0\n num_false = 0\n\n # Compute the precision-recall curve. The x axis is recalls and the y axis precisions.\n for datum in self.data_points:\n # datum[1] is whether the detection a true or false positive\n if datum[1]:\n num_true += 1\n else:\n num_false += 1\n\n precision = num_true / (num_true + num_false)\n recall = num_true / self.num_gt_positives\n\n precisions.append(precision)\n recalls.append(recall)\n\n # Smooth the curve by computing [max(precisions[i:]) for i in range(len(precisions))]\n # Basically, remove any temporary dips from the curve.\n # At least that's what I think, idk. COCOEval did it so I do too.\n for i in range(len(precisions) - 1, 0, -1):\n if precisions[i] > precisions[i - 1]:\n precisions[i - 1] = precisions[i]\n\n # Compute the integral of precision(recall) d_recall from recall=0->1 using fixed-length riemann summation with 101 bars.\n y_range = [0] * 101 # idx 0 is recall == 0.0 and idx 100 is recall == 1.00\n x_range = np.array([x / 100 for x in range(101)])\n recalls = np.array(recalls)\n\n # I realize this is weird, but all it does is find the nearest precision(x) for a given x in x_range.\n # Basically, if the closest recall we have to 0.01 is 0.009 this sets precision(0.01) = precision(0.009).\n # I approximate the integral this way, because that's how COCOEval does it.\n indices = np.searchsorted(recalls, x_range, side='left')\n for bar_idx, precision_idx in enumerate(indices):\n if precision_idx < len(precisions):\n y_range[bar_idx] = precisions[precision_idx]\n\n # Finally compute the riemann sum to get our integral.\n # avg([precision(x) for x in 0:0.01:1])\n return sum(y_range) / len(y_range)\n\n\ndef prep_metrics(ap_data, nms_outs, gt, gt_masks, h, w, num_crowd, image_id, make_json, cocoapi):\n \"\"\" Returns a list of APs for this image, with each element being for a class \"\"\"\n\n with timer.env('After NMS'):\n pred_classes, pred_confs, pred_boxes, pred_masks = after_nms(nms_outs, h, w)\n if pred_classes.size(0) == 0:\n return\n\n pred_classes = list(pred_classes.cpu().numpy().astype(int))\n pred_confs = list(pred_confs.cpu().numpy().astype(float))\n pred_masks = pred_masks.view(-1, h * w).cuda() if cuda else pred_masks.view(-1, h * w)\n pred_boxes = pred_boxes.cuda() if cuda else pred_boxes\n\n if cocoapi:\n with timer.env('Output json'):\n pred_boxes = pred_boxes.cpu().numpy()\n pred_masks = pred_masks.view(-1, h, w).cpu().numpy()\n\n for i in range(pred_masks.shape[0]):\n # Make sure that the bounding box actually makes sense and a mask was produced\n if (pred_boxes[i, 3] - pred_boxes[i, 1]) * (pred_boxes[i, 2] - pred_boxes[i, 0]) > 0:\n make_json.add_bbox(image_id, pred_classes[i], pred_boxes[i, :], pred_confs[i])\n make_json.add_mask(image_id, pred_classes[i], pred_masks[i, :, :], pred_confs[i])\n return\n\n with timer.env('Prepare gt'):\n gt_boxes = torch.Tensor(gt[:, :4])\n gt_boxes[:, [0, 2]] *= w\n gt_boxes[:, [1, 3]] *= h\n gt_classes = list(gt[:, 4].astype(int))\n gt_masks = torch.Tensor(gt_masks).view(-1, h * w)\n\n if num_crowd > 0:\n split = lambda x: (x[-num_crowd:], x[:-num_crowd])\n crowd_boxes, gt_boxes = split(gt_boxes)\n crowd_masks, gt_masks = split(gt_masks)\n crowd_classes, gt_classes = split(gt_classes)\n\n with timer.env('Eval Setup'):\n mask_iou_cache = mask_iou(pred_masks, gt_masks)\n bbox_iou_cache = bbox_iou(pred_boxes.float(), gt_boxes.float())\n\n if num_crowd > 0:\n crowd_mask_iou_cache = mask_iou(pred_masks, crowd_masks, iscrowd=True)\n crowd_bbox_iou_cache = bbox_iou(pred_boxes.float(), crowd_boxes.float(), iscrowd=True)\n else:\n crowd_mask_iou_cache = None\n crowd_bbox_iou_cache = None\n\n iou_types = [('box', lambda i, j: bbox_iou_cache[i, j].item(), lambda i, j: crowd_bbox_iou_cache[i, j].item()),\n ('mask', lambda i, j: mask_iou_cache[i, j].item(), lambda i, j: crowd_mask_iou_cache[i, j].item())]\n\n timer.start('Main loop')\n for _class in set(pred_classes + gt_classes):\n num_gt_per_class = gt_classes.count(_class)\n\n for iouIdx in range(len(iou_thresholds)):\n iou_threshold = iou_thresholds[iouIdx]\n\n for iou_type, iou_func, crowd_func in iou_types:\n gt_used = [False] * len(gt_classes)\n ap_obj = ap_data[iou_type][iouIdx][_class]\n ap_obj.add_gt_positives(num_gt_per_class)\n\n for i, pred_class in enumerate(pred_classes):\n if pred_class != _class:\n continue\n\n max_iou_found = iou_threshold\n max_match_idx = -1\n for j, gt_class in enumerate(gt_classes):\n if gt_used[j] or gt_class != _class:\n continue\n\n iou = iou_func(i, j)\n\n if iou > max_iou_found:\n max_iou_found = iou\n max_match_idx = j\n\n if max_match_idx >= 0:\n gt_used[max_match_idx] = True\n ap_obj.push(pred_confs[i], True)\n else:\n # If the detection matches a crowd, we can just ignore it\n matched_crowd = False\n\n if num_crowd > 0:\n for j in range(len(crowd_classes)):\n if crowd_classes[j] != _class:\n continue\n\n iou = crowd_func(i, j)\n\n if iou > iou_threshold:\n matched_crowd = True\n break\n\n # All this crowd code so that we can make sure that our eval code gives the\n # same result as COCOEval. There aren't even that many crowd annotations to\n # begin with, but accuracy is of the utmost importance.\n if not matched_crowd:\n ap_obj.push(pred_confs[i], False)\n timer.stop('Main loop')\n\n\ndef calc_map(ap_data):\n print('\\nCalculating mAP...')\n aps = [{'box': [], 'mask': []} for _ in iou_thresholds]\n\n for _class in range(len(cfg.dataset.class_names)):\n for iou_idx in range(len(iou_thresholds)):\n for iou_type in ('box', 'mask'):\n ap_obj = ap_data[iou_type][iou_idx][_class]\n\n if not ap_obj.is_empty():\n aps[iou_idx][iou_type].append(ap_obj.get_ap())\n\n all_maps = {'box': OrderedDict(), 'mask': OrderedDict()}\n\n for iou_type in ('box', 'mask'):\n all_maps[iou_type]['all'] = 0 # Make this first in the ordereddict\n for i, threshold in enumerate(iou_thresholds):\n mAP = sum(aps[i][iou_type]) / len(aps[i][iou_type]) * 100 if len(aps[i][iou_type]) > 0 else 0\n all_maps[iou_type][int(threshold * 100)] = mAP\n all_maps[iou_type]['all'] = (sum(all_maps[iou_type].values()) / (len(all_maps[iou_type].values()) - 1))\n\n row1 = list(all_maps['box'].keys())\n row1.insert(0, ' ')\n\n row2 = list(all_maps['box'].values())\n row2 = [round(aa, 2) for aa in row2]\n row2.insert(0, 'box')\n\n row3 = list(all_maps['mask'].values())\n row3 = [round(aa, 2) for aa in row3]\n row3.insert(0, 'mask')\n\n table = [row1, row2, row3]\n table = AsciiTable(table)\n return table.table, row2, row3\n\n\ndef evaluate(net, dataset, max_num=-1, during_training=False, cocoapi=False, traditional_nms=False):\n frame_times = MovingAverage()\n dataset_size = len(dataset) if max_num < 0 else min(max_num, len(dataset))\n dataset_indices = list(range(len(dataset)))\n dataset_indices = dataset_indices[:dataset_size]\n progress_bar = ProgressBar(40, dataset_size)\n\n # For each class and iou, stores tuples (score, isPositive)\n # Index ap_data[type][iouIdx][classIdx]\n ap_data = {'box': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds],\n 'mask': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds]}\n make_json = Make_json()\n\n for i, image_idx in enumerate(dataset_indices):\n timer.reset()\n\n with timer.env('Data loading'):\n img, gt, gt_masks, h, w, num_crowd = dataset.pull_item(image_idx)\n\n batch = img.unsqueeze(0)\n if cuda:\n batch = batch.cuda()\n\n with timer.env('Network forward'):\n net_outs = net(batch)\n nms_outs = NMS(net_outs, traditional_nms)\n prep_metrics(ap_data, nms_outs, gt, gt_masks, h, w, num_crowd, dataset.ids[image_idx], make_json, cocoapi)\n\n # First couple of images take longer because we're constructing the graph.\n # Since that's technically initialization, don't include those in the FPS calculations.\n fps = 0\n if i > 1 and not during_training:\n frame_times.add(timer.total_time())\n fps = 1 / frame_times.get_avg()\n\n progress = (i + 1) / dataset_size * 100\n progress_bar.set_val(i + 1)\n print('\\rProcessing: %s %d / %d (%.2f%%) %.2f fps ' % (\n repr(progress_bar), i + 1, dataset_size, progress, fps), end='')\n\n else:\n if cocoapi:\n make_json.dump()\n print(f'\\nJson files dumped, saved in: \\'results/\\', start evaluting.')\n\n gt_annotations = COCO(cfg.dataset.valid_info)\n bbox_dets = gt_annotations.loadRes(f'results/bbox_detections.json')\n mask_dets = gt_annotations.loadRes(f'results/mask_detections.json')\n\n print('\\nEvaluating BBoxes:')\n bbox_eval = COCOeval(gt_annotations, bbox_dets, 'bbox')\n bbox_eval.evaluate()\n bbox_eval.accumulate()\n bbox_eval.summarize()\n\n print('\\nEvaluating Masks:')\n bbox_eval = COCOeval(gt_annotations, mask_dets, 'segm')\n bbox_eval.evaluate()\n bbox_eval.accumulate()\n bbox_eval.summarize()\n return\n\n table, box_row, mask_row = calc_map(ap_data)\n print(table)\n return table, box_row, mask_row\n\n\niou_thresholds = [x / 100 for x in range(50, 100, 5)]\ncuda = torch.cuda.is_available()\n\nif __name__ == '__main__':\n args = parser.parse_args()\n strs = args.trained_model.split('_')\n config = f'{strs[-3]}_{strs[-2]}_config'\n\n update_config(config)\n print(f'\\nUsing \\'{config}\\' according to the trained_model.\\n')\n\n with torch.no_grad():\n if cuda:\n cudnn.benchmark = True\n cudnn.fastest = True\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n else:\n torch.set_default_tensor_type('torch.FloatTensor')\n\n dataset = COCODetection(cfg.dataset.valid_images, cfg.dataset.valid_info, augmentation=BaseTransform())\n\n net = Yolact()\n net.load_weights('weights/' + args.trained_model, cuda)\n net.eval()\n print('\\nModel loaded.\\n')\n\n if cuda:\n net = net.cuda()\n\n evaluate(net, dataset, args.max_num, False, args.cocoapi, args.traditional_nms)\n"
] | [
[
"numpy.searchsorted",
"torch.no_grad",
"torch.set_default_tensor_type",
"torch.cuda.is_available",
"numpy.array",
"torch.Tensor"
]
] |
nealde/Ampere | [
"75fa9c34940a71ef865eb98b551b4a4a27da96c3"
] | [
"setup.py"
] | [
"import setuptools\nimport pkg_resources\n\nfrom setuptools import setup, Extension\n\n\ndef is_installed(requirement):\n try:\n pkg_resources.require(requirement)\n except pkg_resources.ResolutionError:\n return False\n else:\n return True\n\n\nif not is_installed('numpy>=1.11.0'):\n print(\"\"\"\n Error: numpy needs to be installed first. You can install it via:\n\n $ pip install numpy\n \"\"\")\n exit(1)\n\nif not is_installed('Cython>=0.29'):\n print(\"\"\"\n Error: cython needs to be installed first. You can install it via:\n\n $ pip install cython\n \"\"\")\n exit(1)\n\nimport numpy\nfrom Cython.Distutils import build_ext\nfrom Cython.Build import cythonize\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\nida_dir = \"ampere/models/ida\"\nida_files = ['ida.c', 'ida_band.c', 'ida_dense.c', 'ida_direct.c', 'ida_ic.c', 'ida_io.c', 'nvector_serial.c', 'sundials_band.c', 'sundials_dense.c', 'sundials_direct.c', 'sundials_math.c', 'sundials_nvector.c']\nida_requirements1 = [ida_dir + '/' + ida_file for ida_file in ida_files]\n\n\next_modules = [\n Extension(\"ampere.models.P2D.P2D_fd\", [\"ampere/models/P2D/P2D_fd.pyx\", \"ampere/models/P2D/P2D_fd.c\", *ida_requirements1], include_dirs=[numpy.get_include()]),\n Extension(\"ampere.models.SPM.SPM_fd\", [\"ampere/models/SPM/SPM_fd.pyx\", \"ampere/models/SPM/SPM_fd.c\", *ida_requirements1], include_dirs=[numpy.get_include()]),\n Extension(\"ampere.models.SPM.SPM_fd_sei\", [\"ampere/models/SPM/SPM_fd_sei.pyx\", \"ampere/models/SPM/SPM_fd_sei.c\", *ida_requirements1], include_dirs=[numpy.get_include()]),\n Extension(\"ampere.models.SPM.SPM_par\", [\"ampere/models/SPM/SPM_par.pyx\", \"ampere/models/SPM/SPM_par.c\", *ida_requirements1], include_dirs=[numpy.get_include()]),\n]\ncmdclass = {'build_ext': build_ext}\n\nprint(setuptools.find_packages())\nsetup(\n name=\"ampere\",\n version=\"0.5.4\",\n author=\"Neal Dawson-Elli\",\n author_email=\"[email protected]\",\n description=\"A Python package for working with battery discharge data and physics-based battery models\",\n\n cmdclass=cmdclass,\n ext_modules=cythonize(ext_modules, compiler_directives={'language_level' : \"3\"}),\n\n\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/nealde/Ampere\",\n packages=[*setuptools.find_packages()],\n install_requires=['cython', 'matplotlib < 3.4', 'numpy', 'scipy'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n 'Programming Language :: Cython',\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering :: Mathematics',\n ],\n keywords=\"battery numerical simulation modeling\",\n)\n"
] | [
[
"numpy.get_include"
]
] |
hboshnak/mindarmour | [
"0609a4eaea875a84667bed279add9305752880cc",
"0609a4eaea875a84667bed279add9305752880cc"
] | [
"mindarmour/fuzz_testing/model_coverage_metrics.py",
"tests/ut/python/privacy/diff_privacy/test_monitor.py"
] | [
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModel-Test Coverage Metrics.\n\"\"\"\nfrom abc import abstractmethod\nfrom collections import defaultdict\nimport math\nimport numpy as np\n\nfrom mindspore import Tensor\nfrom mindspore import Model\nfrom mindspore.train.summary.summary_record import _get_summary_tensor_data\n\nfrom mindarmour.utils._check_param import check_model, check_numpy_param, check_int_positive, \\\n check_param_type, check_value_positive\nfrom mindarmour.utils.logger import LogUtil\n\nLOGGER = LogUtil.get_instance()\nTAG = 'CoverageMetrics'\n\n\nclass CoverageMetrics:\n \"\"\"\n The abstract base class for Neuron coverage classes calculating coverage metrics.\n\n As we all known, each neuron output of a network will have a output range after training (we call it original\n range), and test dataset is used to estimate the accuracy of the trained network. However, neurons' output\n distribution would be different with different test datasets. Therefore, similar to function fuzz, model fuzz means\n testing those neurons' outputs and estimating the proportion of original range that has emerged with test\n datasets.\n\n Reference: `DeepGauge: Multi-Granularity Testing Criteria for Deep Learning Systems\n <https://arxiv.org/abs/1803.07519>`_\n\n Args:\n model (Model): The pre-trained model which waiting for testing.\n incremental (bool): Metrics will be calculate in incremental way or not. Default: False.\n batch_size (int): The number of samples in a fuzz test batch. Default: 32.\n \"\"\"\n\n def __init__(self, model, incremental=False, batch_size=32):\n self._model = check_model('model', model, Model)\n self.incremental = check_param_type('incremental', incremental, bool)\n self.batch_size = check_int_positive('batch_size', batch_size)\n self._activate_table = defaultdict(list)\n\n @abstractmethod\n def get_metrics(self, dataset):\n \"\"\"\n Calculate coverage metrics of given dataset.\n\n Args:\n dataset (numpy.ndarray): Dataset used to calculate coverage metrics.\n\n Raises:\n NotImplementedError: It is an abstract method.\n \"\"\"\n msg = 'The function get_metrics() is an abstract method in class `CoverageMetrics`, and should be' \\\n ' implemented in child class.'\n LOGGER.error(TAG, msg)\n raise NotImplementedError(msg)\n\n def _init_neuron_activate_table(self, data):\n \"\"\"\n Initialise the activate table of each neuron in the model with format:\n {'layer1': [n1, n2, n3, ..., nn], 'layer2': [n1, n2, n3, ..., nn], ...}\n\n Args:\n data (numpy.ndarray): Data used for initialising the activate table.\n\n Return:\n dict, return a activate_table.\n \"\"\"\n self._model.predict(Tensor(data))\n layer_out = _get_summary_tensor_data()\n if not layer_out:\n msg = 'User must use TensorSummary() operation to specify the middle layer of the model participating in ' \\\n 'the coverage calculation.'\n LOGGER.error(TAG, msg)\n raise ValueError(msg)\n activate_table = defaultdict()\n for layer, value in layer_out.items():\n activate_table[layer] = np.zeros(value.shape[1], np.bool)\n return activate_table\n\n def _get_bounds(self, train_dataset):\n \"\"\"\n Update the lower and upper boundaries of neurons' outputs.\n\n Args:\n train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.\n\n Return:\n - numpy.ndarray, upper bounds of neuron' outputs.\n\n - numpy.ndarray, lower bounds of neuron' outputs.\n \"\"\"\n upper_bounds = defaultdict(list)\n lower_bounds = defaultdict(list)\n batches = math.ceil(train_dataset.shape[0] / self.batch_size)\n for i in range(batches):\n inputs = train_dataset[i * self.batch_size: (i + 1) * self.batch_size]\n self._model.predict(Tensor(inputs))\n layer_out = _get_summary_tensor_data()\n for layer, tensor in layer_out.items():\n value = tensor.asnumpy()\n value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))\n min_value = np.min(value, axis=0)\n max_value = np.max(value, axis=0)\n if np.any(upper_bounds[layer]):\n max_flag = upper_bounds[layer] > max_value\n min_flag = lower_bounds[layer] < min_value\n upper_bounds[layer] = upper_bounds[layer] * max_flag + max_value * (1 - max_flag)\n lower_bounds[layer] = lower_bounds[layer] * min_flag + min_value * (1 - min_flag)\n else:\n upper_bounds[layer] = max_value\n lower_bounds[layer] = min_value\n return upper_bounds, lower_bounds\n\n def _activate_rate(self):\n \"\"\"\n Calculate the activate rate of neurons.\n \"\"\"\n total_neurons = 0\n activated_neurons = 0\n for _, value in self._activate_table.items():\n activated_neurons += np.sum(value)\n total_neurons += len(value)\n activate_rate = activated_neurons / total_neurons\n\n return activate_rate\n\n\nclass NeuronCoverage(CoverageMetrics):\n \"\"\"\n Calculate the neurons activated coverage. Neuron is activated when its output is greater than the threshold.\n Neuron coverage equals the proportion of activated neurons to total neurons in the network.\n\n Args:\n model (Model): The pre-trained model which waiting for testing.\n threshold (float): Threshold used to determined neurons is activated or not. Default: 0.1.\n incremental (bool): Metrics will be calculate in incremental way or not. Default: False.\n batch_size (int): The number of samples in a fuzz test batch. Default: 32.\n\n \"\"\"\n def __init__(self, model, threshold=0.1, incremental=False, batch_size=32):\n super(NeuronCoverage, self).__init__(model, incremental, batch_size)\n threshold = check_param_type('threshold', threshold, float)\n self.threshold = check_value_positive('threshold', threshold)\n\n\n def get_metrics(self, dataset):\n \"\"\"\n Get the metric of neuron coverage: the proportion of activated neurons to total neurons in the network.\n\n Args:\n dataset (numpy.ndarray): Dataset used to calculate coverage metrics.\n\n Returns:\n float, the metric of 'neuron coverage'.\n\n Examples:\n >>> nc = NeuronCoverage(model, threshold=0.1)\n >>> nc_metrics = nc.get_metrics(test_data)\n \"\"\"\n dataset = check_numpy_param('dataset', dataset)\n batches = math.ceil(dataset.shape[0] / self.batch_size)\n if not self.incremental or not self._activate_table:\n self._activate_table = self._init_neuron_activate_table(dataset[0:1])\n for i in range(batches):\n inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]\n self._model.predict(Tensor(inputs))\n layer_out = _get_summary_tensor_data()\n for layer, tensor in layer_out.items():\n value = tensor.asnumpy()\n value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))\n activate = np.sum(value > self.threshold, axis=0) > 0\n self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)\n neuron_coverage = self._activate_rate()\n return neuron_coverage\n\n\nclass TopKNeuronCoverage(CoverageMetrics):\n \"\"\"\n Calculate the top k activated neurons coverage. Neuron is activated when its output has the top k largest value in\n that hidden layers. Top k neurons coverage equals the proportion of activated neurons to total neurons in the\n network.\n\n Args:\n model (Model): The pre-trained model which waiting for testing.\n top_k (int): Neuron is activated when its output has the top k largest value in that hidden layers. Default: 3.\n incremental (bool): Metrics will be calculate in incremental way or not. Default: False.\n batch_size (int): The number of samples in a fuzz test batch. Default: 32.\n \"\"\"\n def __init__(self, model, top_k=3, incremental=False, batch_size=32):\n super(TopKNeuronCoverage, self).__init__(model, incremental=incremental, batch_size=batch_size)\n self.top_k = check_int_positive('top_k', top_k)\n\n def get_metrics(self, dataset):\n \"\"\"\n Get the metric of Top K activated neuron coverage.\n\n Args:\n dataset (numpy.ndarray): Dataset used to calculate coverage metrics.\n\n Returns:\n float, the metrics of 'top k neuron coverage'.\n\n Examples:\n >>> tknc = TopKNeuronCoverage(model, top_k=3)\n >>> metrics = tknc.get_metrics(test_data)\n \"\"\"\n dataset = check_numpy_param('dataset', dataset)\n batches = math.ceil(dataset.shape[0] / self.batch_size)\n if not self.incremental or not self._activate_table:\n self._activate_table = self._init_neuron_activate_table(dataset[0:1])\n for i in range(batches):\n inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]\n self._model.predict(Tensor(inputs))\n layer_out = _get_summary_tensor_data()\n for layer, tensor in layer_out.items():\n value = tensor.asnumpy()\n if len(value.shape) > 2:\n value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))\n top_k_value = np.sort(value)[:, -self.top_k].reshape(value.shape[0], 1)\n top_k_value = np.sum((value - top_k_value) >= 0, axis=0) > 0\n self._activate_table[layer] = np.logical_or(self._activate_table[layer], top_k_value)\n top_k_neuron_coverage = self._activate_rate()\n return top_k_neuron_coverage\n\n\nclass SuperNeuronActivateCoverage(CoverageMetrics):\n \"\"\"\n Get the metric of 'super neuron activation coverage'. :math:`SNAC = |UpperCornerNeuron|/|N|`. SNAC refers to the\n proportion of neurons whose neurons output value in the test set exceeds the upper bounds of the corresponding\n neurons output value in the training set.\n\n Args:\n model (Model): The pre-trained model which waiting for testing.\n train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.\n incremental (bool): Metrics will be calculate in incremental way or not. Default: False.\n batch_size (int): The number of samples in a fuzz test batch. Default: 32.\n \"\"\"\n def __init__(self, model, train_dataset, incremental=False, batch_size=32):\n super(SuperNeuronActivateCoverage, self).__init__(model, incremental=incremental, batch_size=batch_size)\n train_dataset = check_numpy_param('train_dataset', train_dataset)\n self.upper_bounds, self.lower_bounds = self._get_bounds(train_dataset=train_dataset)\n\n def get_metrics(self, dataset):\n \"\"\"\n Get the metric of 'strong neuron activation coverage'.\n\n Args:\n dataset (numpy.ndarray): Dataset used to calculate coverage metrics.\n\n Returns:\n float, the metric of 'strong neuron activation coverage'.\n\n Examples:\n >>> snac = SuperNeuronActivateCoverage(model, train_dataset)\n >>> metrics = snac.get_metrics(test_data)\n \"\"\"\n dataset = check_numpy_param('dataset', dataset)\n if not self.incremental or not self._activate_table:\n self._activate_table = self._init_neuron_activate_table(dataset[0:1])\n batches = math.ceil(dataset.shape[0] / self.batch_size)\n\n for i in range(batches):\n inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]\n self._model.predict(Tensor(inputs))\n layer_out = _get_summary_tensor_data()\n for layer, tensor in layer_out.items():\n value = tensor.asnumpy()\n if len(value.shape) > 2:\n value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))\n activate = np.sum(value > self.upper_bounds[layer], axis=0) > 0\n self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)\n snac = self._activate_rate()\n return snac\n\n\nclass NeuronBoundsCoverage(SuperNeuronActivateCoverage):\n \"\"\"\n Get the metric of 'neuron boundary coverage' :math:`NBC = (|UpperCornerNeuron| + |LowerCornerNeuron|)/(2*|N|)`,\n where :math:`|N|` is the number of neurons, NBC refers to the proportion of neurons whose neurons output value in\n the test dataset exceeds the upper and lower bounds of the corresponding neurons output value in the training\n dataset.\n\n Args:\n model (Model): The pre-trained model which waiting for testing.\n train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.\n incremental (bool): Metrics will be calculate in incremental way or not. Default: False.\n batch_size (int): The number of samples in a fuzz test batch. Default: 32.\n \"\"\"\n\n def __init__(self, model, train_dataset, incremental=False, batch_size=32):\n super(NeuronBoundsCoverage, self).__init__(model, train_dataset, incremental=incremental, batch_size=batch_size)\n\n def get_metrics(self, dataset):\n \"\"\"\n Get the metric of 'neuron boundary coverage'.\n\n Args:\n dataset (numpy.ndarray): Dataset used to calculate coverage metrics.\n\n Returns:\n float, the metric of 'neuron boundary coverage'.\n\n Examples:\n >>> nbc = NeuronBoundsCoverage(model, train_dataset)\n >>> metrics = nbc.get_metrics(test_data)\n \"\"\"\n dataset = check_numpy_param('dataset', dataset)\n if not self.incremental or not self._activate_table:\n self._activate_table = self._init_neuron_activate_table(dataset[0:1])\n\n batches = math.ceil(dataset.shape[0] / self.batch_size)\n for i in range(batches):\n inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]\n self._model.predict(Tensor(inputs))\n layer_out = _get_summary_tensor_data()\n for layer, tensor in layer_out.items():\n value = tensor.asnumpy()\n if len(value.shape) > 2:\n value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))\n outer = np.logical_or(value > self.upper_bounds[layer], value < self.lower_bounds[layer])\n activate = np.sum(outer, axis=0) > 0\n self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)\n nbc = self._activate_rate()\n return nbc\n\n\nclass KMultisectionNeuronCoverage(SuperNeuronActivateCoverage):\n \"\"\"\n Get the metric of 'k-multisection neuron coverage'. KMNC measures how thoroughly the given set of test inputs\n covers the range of neurons output values derived from training dataset.\n\n Args:\n model (Model): The pre-trained model which waiting for testing.\n train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.\n segmented_num (int): The number of segmented sections of neurons' output intervals. Default: 100.\n incremental (bool): Metrics will be calculate in incremental way or not. Default: False.\n batch_size (int): The number of samples in a fuzz test batch. Default: 32.\n \"\"\"\n\n def __init__(self, model, train_dataset, segmented_num=100, incremental=False, batch_size=32):\n super(KMultisectionNeuronCoverage, self).__init__(model, train_dataset, incremental=incremental,\n batch_size=batch_size)\n self.segmented_num = check_int_positive('segmented_num', segmented_num)\n self.intervals = defaultdict(list)\n for keys in self.upper_bounds.keys():\n self.intervals[keys] = (self.upper_bounds[keys] - self.lower_bounds[keys]) / self.segmented_num\n\n def _init_k_multisection_table(self, data):\n \"\"\" Initial the activate table.\"\"\"\n self._model.predict(Tensor(data))\n layer_out = _get_summary_tensor_data()\n activate_section_table = defaultdict()\n for layer, value in layer_out.items():\n activate_section_table[layer] = np.zeros((value.shape[1], self.segmented_num), np.bool)\n return activate_section_table\n\n def get_metrics(self, dataset):\n \"\"\"\n Get the metric of 'k-multisection neuron coverage'.\n\n Args:\n dataset (numpy.ndarray): Dataset used to calculate coverage metrics.\n\n Returns:\n float, the metric of 'k-multisection neuron coverage'.\n\n Examples:\n >>> kmnc = KMultisectionNeuronCoverage(model, train_dataset, segmented_num=100)\n >>> metrics = kmnc.get_metrics(test_data)\n \"\"\"\n\n dataset = check_numpy_param('dataset', dataset)\n if not self.incremental or not self._activate_table:\n self._activate_table = self._init_k_multisection_table(dataset[0:1])\n\n batches = math.ceil(dataset.shape[0] / self.batch_size)\n for i in range(batches):\n inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]\n self._model.predict(Tensor(inputs))\n layer_out = _get_summary_tensor_data()\n for layer, tensor in layer_out.items():\n value = tensor.asnumpy()\n value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))\n hits = np.floor((value - self.lower_bounds[layer]) / self.intervals[layer]).astype(int)\n hits = np.transpose(hits, [1, 0])\n for n in range(len(hits)):\n for sec in hits[n]:\n if sec >= self.segmented_num or sec < 0:\n continue\n self._activate_table[layer][n][sec] = True\n\n kmnc = self._activate_rate() / self.segmented_num\n return kmnc\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDP-Monitor test.\n\"\"\"\nimport pytest\nimport numpy as np\n\nimport mindspore.nn as nn\nimport mindspore.dataset as ds\nfrom mindspore.train import Model\nimport mindspore.context as context\n\nfrom mindarmour.privacy.diff_privacy import PrivacyMonitorFactory\nfrom mindarmour.utils.logger import LogUtil\n\nfrom tests.ut.python.utils.mock_net import Net\n\nLOGGER = LogUtil.get_instance()\nTAG = 'DP-Monitor Test'\n\n\ndef dataset_generator():\n batch_size = 16\n batches = 128\n\n data = np.random.random((batches * batch_size, 1, 32, 32)).astype(\n np.float32)\n label = np.random.randint(0, 10, batches * batch_size).astype(np.int32)\n for i in range(batches):\n yield data[i * batch_size: (i + 1) * batch_size], \\\n label[i * batch_size: (i + 1) * batch_size]\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_card\[email protected]_mindarmour\ndef test_dp_monitor():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n batch_size = 16\n epochs = 1\n rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000,\n batch_size=batch_size,\n initial_noise_multiplier=0.4,\n noise_decay_rate=6e-3)\n suggest_epoch = rdp.max_epoch_suggest()\n LOGGER.info(TAG, 'The recommended maximum training epochs is: %s',\n suggest_epoch)\n network = Net()\n net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)\n\n model = Model(network, net_loss, net_opt)\n\n LOGGER.info(TAG, \"============== Starting Training ==============\")\n ds1 = ds.GeneratorDataset(dataset_generator,\n [\"data\", \"label\"])\n model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_card\[email protected]_mindarmour\ndef test_dp_monitor_gpu():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n batch_size = 16\n epochs = 1\n rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000,\n batch_size=batch_size,\n initial_noise_multiplier=0.4,\n noise_decay_rate=6e-3)\n suggest_epoch = rdp.max_epoch_suggest()\n LOGGER.info(TAG, 'The recommended maximum training epochs is: %s',\n suggest_epoch)\n network = Net()\n net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)\n\n model = Model(network, net_loss, net_opt)\n\n LOGGER.info(TAG, \"============== Starting Training ==============\")\n ds1 = ds.GeneratorDataset(dataset_generator,\n [\"data\", \"label\"])\n model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_card\[email protected]_mindarmour\ndef test_dp_monitor_cpu():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n batch_size = 16\n epochs = 1\n rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000,\n batch_size=batch_size,\n initial_noise_multiplier=0.4,\n noise_decay_rate=6e-3)\n suggest_epoch = rdp.max_epoch_suggest()\n LOGGER.info(TAG, 'The recommended maximum training epochs is: %s',\n suggest_epoch)\n network = Net()\n net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)\n\n model = Model(network, net_loss, net_opt)\n\n LOGGER.info(TAG, \"============== Starting Training ==============\")\n ds1 = ds.GeneratorDataset(dataset_generator,\n [\"data\", \"label\"])\n model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_card\[email protected]_mindarmour\ndef test_dp_monitor_zcdp():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n batch_size = 16\n epochs = 1\n zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000,\n batch_size=batch_size,\n initial_noise_multiplier=0.4,\n noise_decay_rate=6e-3)\n suggest_epoch = zcdp.max_epoch_suggest()\n LOGGER.info(TAG, 'The recommended maximum training epochs is: %s',\n suggest_epoch)\n network = Net()\n net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)\n\n model = Model(network, net_loss, net_opt)\n\n LOGGER.info(TAG, \"============== Starting Training ==============\")\n ds1 = ds.GeneratorDataset(dataset_generator,\n [\"data\", \"label\"])\n model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_card\[email protected]_mindarmour\ndef test_dp_monitor_zcdp_gpu():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n batch_size = 16\n epochs = 1\n zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000,\n batch_size=batch_size,\n initial_noise_multiplier=0.4,\n noise_decay_rate=6e-3)\n suggest_epoch = zcdp.max_epoch_suggest()\n LOGGER.info(TAG, 'The recommended maximum training epochs is: %s',\n suggest_epoch)\n network = Net()\n net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)\n\n model = Model(network, net_loss, net_opt)\n\n LOGGER.info(TAG, \"============== Starting Training ==============\")\n ds1 = ds.GeneratorDataset(dataset_generator,\n [\"data\", \"label\"])\n model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False)\n\n\[email protected]\[email protected]_x86_cpu\[email protected]_card\[email protected]_mindarmour\ndef test_dp_monitor_zcdp_cpu():\n context.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n batch_size = 16\n epochs = 1\n zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000,\n batch_size=batch_size,\n initial_noise_multiplier=0.4,\n noise_decay_rate=6e-3)\n suggest_epoch = zcdp.max_epoch_suggest()\n LOGGER.info(TAG, 'The recommended maximum training epochs is: %s',\n suggest_epoch)\n network = Net()\n net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction=\"mean\")\n net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)\n\n model = Model(network, net_loss, net_opt)\n\n LOGGER.info(TAG, \"============== Starting Training ==============\")\n ds1 = ds.GeneratorDataset(dataset_generator,\n [\"data\", \"label\"])\n model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False)\n"
] | [
[
"numpy.sum",
"numpy.logical_or",
"numpy.transpose",
"numpy.zeros",
"numpy.any",
"numpy.floor",
"numpy.max",
"numpy.min",
"numpy.sort"
],
[
"numpy.random.random",
"numpy.random.randint"
]
] |
Exusial/jittor | [
"eca21d5bba5098bce4f492fa44908677b6e76588"
] | [
"python/jittor/test/test_argsort_op.py"
] | [
"# ***************************************************************\n# Copyright (c) 2021 Jittor. All Rights Reserved. \n# Maintainers: \n# Guoye Yang <[email protected]>\n# Dun Liang <[email protected]>. \n# \n# This file is subject to the terms and conditions defined in\n# file 'LICENSE.txt', which is part of this source code package.\n# ***************************************************************\nimport unittest\nimport jittor as jt\nimport numpy as np\nfrom jittor import compile_extern\nfrom .test_log import find_log_with_re\nif jt.has_cuda:\n from jittor.compile_extern import cublas_ops, cudnn_ops, cub_ops\nelse:\n cublas_ops = cudnn_ops = cub_ops = None\n\ndef check_argsort(shape, dim, descending = False):\n x = jt.random(shape)\n y, y_key = jt.argsort(x, dim=dim, descending=descending)\n v = []\n for i in range(len(shape)):\n if (i == dim):\n v.append(y)\n else:\n v.append(jt.index(shape, dim=i))\n yk = jt.reindex(x, v)\n yk_ = yk.data\n y_key_ = y_key.data\n x__ = x.data\n if descending:\n x__ = -x__\n yk__ = np.sort(x__, axis=dim)\n if descending:\n yk__ = -yk__\n assert np.allclose(y_key_, yk__)\n assert np.allclose(yk_, yk__)\n\ndef check_cub_argsort(shape, dim, descending = False):\n with jt.log_capture_scope(\n log_silent=1,\n log_v=0, log_vprefix=\"op.cc=100\"\n ) as raw_log:\n x = jt.random(shape)\n y, y_key = jt.argsort(x, dim=dim, descending=descending)\n v = []\n for i in range(len(shape)):\n if (i == dim):\n v.append(y)\n else:\n v.append(jt.index(shape, dim=i))\n yk = jt.reindex(x, v)\n yk_ = yk.data\n y_key_ = y_key.data\n logs = find_log_with_re(raw_log, \"(Jit op key (not )?found: \" + \"cub_argsort\" + \".*)\")\n assert len(logs)==1\n x__ = x.data\n if descending:\n x__ = -x__\n yk__ = np.sort(x__, axis=dim)\n if descending:\n yk__ = -yk__\n assert np.allclose(y_key_, yk__)\n assert np.allclose(yk_, yk__)\n\ndef check_backward(shape, dim, descending = False):\n x = jt.random(shape)\n y, y_key = jt.argsort(x, dim=dim, descending=descending)\n loss = (y_key * y_key).sum()\n gs = jt.grad(loss, x)\n assert np.allclose(x.data*2, gs.data)\n\nclass TestArgsortOp(unittest.TestCase):\n def test(self):\n check_argsort([5,5], 0, False)\n check_argsort([5,5], 0, True)\n check_argsort([5,5], 1, False)\n check_argsort([5,5], 1, True)\n check_argsort([12, 34, 56, 78], 1, True)\n check_argsort([12, 34, 56, 78], 3, True)\n check_argsort([12, 34, 56, 78], 2, False)\n check_argsort([12, 34, 56, 78], 0, False)\n\n def test_backward(self):\n check_backward([5,5], 0, False)\n check_backward([5,5], 0, True)\n check_backward([5,5], 1, False)\n check_backward([5,5], 1, True)\n check_backward([12, 34, 56, 78], 1, True)\n check_backward([12, 34, 56, 78], 3, True)\n check_backward([12, 34, 56, 78], 2, False)\n check_backward([12, 34, 56, 78], 0, False)\n\n def test_doc(self):\n assert \"Argsort Operator\" in jt.argsort.__doc__\n\n @unittest.skipIf(cub_ops==None, \"Not use cub, Skip\")\n @jt.flag_scope(use_cuda=1)\n def test_cub(self):\n check_cub_argsort([5,5], 0, False)\n check_cub_argsort([5,5], 0, True)\n check_cub_argsort([5,5], 1, False)\n check_cub_argsort([5,5], 1, True)\n check_cub_argsort([12, 34, 56, 78], 1, True)\n check_cub_argsort([12, 34, 56, 78], 3, True)\n check_cub_argsort([12, 34, 56, 78], 2, False)\n check_cub_argsort([12, 34, 56, 78], 0, False)\n\n @unittest.skipIf(cub_ops==None, \"Not use cub, Skip\")\n @jt.flag_scope(use_cuda=1)\n def test_cub_backward(self):\n check_backward([5,5], 0, False)\n check_backward([5,5], 0, True)\n check_backward([5,5], 1, False)\n check_backward([5,5], 1, True)\n check_backward([12, 34, 56, 78], 1, True)\n check_backward([12, 34, 56, 78], 3, True)\n check_backward([12, 34, 56, 78], 2, False)\n check_backward([12, 34, 56, 78], 0, False)\n\nif __name__ == \"__main__\":\n unittest.main()"
] | [
[
"numpy.sort",
"numpy.allclose"
]
] |
6666ev/bert_seq2seq | [
"caa9b6c5629ae5783c733aebbbcf669d8ab5dde2"
] | [
"examples/roberta_coarsness_NER_CRF_train.py"
] | [
"## 粗粒度ner加crf层的例子\nimport torch\nfrom tqdm import tqdm\nimport unicodedata\nimport os\nimport time\nfrom torch.utils.data import Dataset, DataLoader\nfrom bert_seq2seq import Tokenizer, load_chinese_base_vocab\nfrom bert_seq2seq import load_bert\n\ndata_path = \"./state_dict/corase_train_update.txt\"\n\nvocab_path = \"./state_dict/roberta_wwm_vocab.txt\" # roberta模型字典的位置\nmodel_name = \"roberta\" # 选择模型名字\nmodel_path = \"./state_dict/roberta_wwm_pytorch_model.bin\" # roberta模型位置\nrecent_model_path = \"\" # 用于把已经训练好的模型继续训练\nmodel_save_path = \"./bert_粗粒度ner_crf.bin\"\nbatch_size = 4\nlr = 1e-5\n\nword2idx = load_chinese_base_vocab(vocab_path)\n\ntarget = [\"O\", \"B-LOC\", \"I-LOC\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\"]\n\ndef _is_punctuation(ch):\n \"\"\"标点符号类字符判断(全/半角均在此内)\n \"\"\"\n code = ord(ch)\n return 33 <= code <= 47 or \\\n 58 <= code <= 64 or \\\n 91 <= code <= 96 or \\\n 123 <= code <= 126 or \\\n unicodedata.category(ch).startswith('P')\n\ndef _cjk_punctuation():\n return u'\\uff02\\uff03\\uff04\\uff05\\uff06\\uff07\\uff08\\uff09\\uff0a\\uff0b\\uff0c\\uff0d\\uff0f\\uff1a\\uff1b\\uff1c\\uff1d\\uff1e\\uff20\\uff3b\\uff3c\\uff3d\\uff3e\\uff3f\\uff40\\uff5b\\uff5c\\uff5d\\uff5e\\uff5f\\uff60\\uff62\\uff63\\uff64\\u3000\\u3001\\u3003\\u3008\\u3009\\u300a\\u300b\\u300c\\u300d\\u300e\\u300f\\u3010\\u3011\\u3014\\u3015\\u3016\\u3017\\u3018\\u3019\\u301a\\u301b\\u301c\\u301d\\u301e\\u301f\\u3030\\u303e\\u303f\\u2013\\u2014\\u2018\\u2019\\u201b\\u201c\\u201d\\u201e\\u201f\\u2026\\u2027\\ufe4f\\ufe51\\ufe54\\xb7\\uff01\\uff1f\\uff61\\u3002'\n\ndef _is_cjk_character(ch):\n \"\"\"CJK类字符判断(包括中文字符也在此列)\n 参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n \"\"\"\n code = ord(ch)\n return 0x4E00 <= code <= 0x9FFF or \\\n 0x3400 <= code <= 0x4DBF or \\\n 0x20000 <= code <= 0x2A6DF or \\\n 0x2A700 <= code <= 0x2B73F or \\\n 0x2B740 <= code <= 0x2B81F or \\\n 0x2B820 <= code <= 0x2CEAF or \\\n 0xF900 <= code <= 0xFAFF or \\\n 0x2F800 <= code <= 0x2FA1F\n\ndef _is_control(ch):\n \"\"\"控制类字符判断\n \"\"\"\n return unicodedata.category(ch) in ('Cc', 'Cf')\n\ndef word_piece_tokenize(word):\n \"\"\"word内分成subword\n \"\"\"\n if word in word2idx:\n return [word]\n tokens = []\n start, stop = 0, 0\n while start < len(word):\n stop = len(word)\n while stop > start:\n sub = word[start:stop]\n if start > 0:\n sub = '##' + sub\n if sub in word2idx:\n break\n stop -= 1\n if start == stop:\n stop += 1\n tokens.append(sub)\n start = stop\n\n return tokens\n\ndef read_corpus(data_path):\n \"\"\"\n 读原始数据\n \"\"\"\n sents_src = []\n sents_tgt = []\n\n with open(data_path, encoding=\"utf-8\") as f:\n lines = f.readlines()\n row = \"\"\n t = []\n for line in lines:\n if line == \"\\n\":\n \n if len(row) < 300: \n sents_src.append(row)\n sents_tgt.append(t)\n row = \"\"\n t = []\n continue\n line = line.split(\" \")\n row = row + line[0]\n t.append(line[1].strip(\"\\n\"))\n\n return sents_src, sents_tgt\n\n## 自定义dataset\nclass NERDataset(Dataset):\n \"\"\"\n 针对特定数据集,定义一个相关的取数据的方式\n \"\"\"\n def __init__(self, sents_src, sents_tgt) :\n ## 一般init函数是加载所有数据\n super(NERDataset, self).__init__()\n # 读原始数据\n # self.sents_src, self.sents_tgt = read_corpus(poem_corpus_dir)\n self.sents_src = sents_src\n self.sents_tgt = sents_tgt\n \n self.idx2word = {k: v for v, k in word2idx.items()}\n self.tokenizer = Tokenizer(word2idx)\n\n def __getitem__(self, i):\n ## 得到单个数据\n # print(i)\n src = self.sents_src[i]\n tgt = self.sents_tgt[i]\n tgt = [\"O\"] + tgt + [\"O\"]\n tgt = [target.index(i) for i in tgt ]\n token_ids, token_type_ids = self.tokenizer.encode(src)\n if len(token_ids) != len(tgt):\n print(\"not equal\")\n os._exit(0)\n output = {\n \"token_ids\": token_ids,\n \"token_type_ids\": token_type_ids,\n \"target_id\": tgt\n }\n return output\n\n def __len__(self):\n return len(self.sents_src)\n \ndef collate_fn(batch):\n \"\"\"\n 动态padding, batch为一部分sample\n \"\"\"\n\n def padding(indice, max_length, pad_idx=0):\n \"\"\"\n pad 函数\n \"\"\"\n pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]\n return torch.tensor(pad_indice)\n\n\n token_ids = [data[\"token_ids\"] for data in batch]\n \n max_length = max([len(t) for t in token_ids])\n token_type_ids = [data[\"token_type_ids\"] for data in batch]\n target_ids = [data[\"target_id\"] for data in batch]\n \n token_ids_padded = padding(token_ids, max_length)\n token_type_ids_padded = padding(token_type_ids, max_length)\n target_ids_padded = padding(target_ids, max_length)\n\n return token_ids_padded, token_type_ids_padded, target_ids_padded\n\ndef viterbi_decode(nodes, trans):\n \"\"\"\n 维特比算法 解码\n nodes: (seq_len, target_size)\n trans: (target_size, target_size)\n \"\"\"\n scores = nodes[0]\n scores[1:] -= 100000 # 刚开始标签肯定是\"O\"\n target_size = nodes.shape[1]\n seq_len = nodes.shape[0]\n labels = torch.arange(0, target_size).view(1, -1)\n path = labels\n for l in range(1, seq_len):\n scores = scores.view(-1, 1)\n M = scores + trans + nodes[l].view(1, -1)\n scores, ids = M.max(0)\n path = torch.cat((path[:, ids], labels), dim=0)\n # print(scores)\n # print(scores)\n return path[:, scores.argmax()]\n\ndef ner_print(model, test_data, device=\"cpu\"):\n model.eval()\n idxtword = {v: k for k, v in word2idx.items()}\n\n tokenier = Tokenizer(word2idx)\n trans = model.state_dict()[\"crf_layer.trans\"]\n for text in test_data:\n decode = []\n text_encode, text_ids = tokenier.encode(text)\n \n text_tensor = torch.tensor(text_encode, device=device).view(1, -1)\n out = model(text_tensor).squeeze(0) # 其实是nodes\n labels = viterbi_decode(out, trans)\n starting = False\n for l in labels:\n if l > 0:\n label = target[l.item()]\n if label[0] == \"B\":\n decode.append(label[2: ])\n starting = True\n elif starting:\n decode.append(label[2: ])\n else: \n starting = False\n decode.append(\"O\")\n else :\n decode.append(\"O\")\n flag = 0\n\n res = {}\n text_decode = [idxtword[i] for i in text_encode]\n for index, each_entity in enumerate(decode):\n if each_entity != \"O\":\n if flag != each_entity:\n # cur_text = \"\".join([text[t] for t in mapping[index]])\n cur_text = text_decode[index]\n if each_entity in res.keys():\n res[each_entity].append(cur_text)\n else :\n res[each_entity] = [cur_text]\n flag = each_entity\n elif flag == each_entity:\n res[each_entity][-1] += text_decode[index]\n # res[each_entity][-1] += \"\".join([text[t] for t in mapping[index]])\n else :\n flag = 0\n print(res)\n\n\nclass Trainer:\n def __init__(self):\n # 加载数据\n \n self.sents_src, self.sents_tgt = read_corpus(data_path)\n\n self.tokenier = Tokenizer(word2idx)\n # 判断是否有可用GPU\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"device: \" + str(self.device))\n # 定义模型\n self.bert_model = load_bert(word2idx, model_name=model_name, model_class=\"sequence_labeling_crf\", target_size=len(target))\n ## 加载预训练的模型参数~\n self.bert_model.load_pretrain_params(model_path)\n # 将模型发送到计算设备(GPU或CPU)\n self.bert_model.set_device(self.device)\n # 声明需要优化的参数\n self.optim_parameters = list(self.bert_model.parameters())\n self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3)\n # 声明自定义的数据加载器\n dataset = NERDataset(self.sents_src, self.sents_tgt)\n self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\n\n def train(self, epoch):\n # 一个epoch的训练\n self.bert_model.train()\n self.iteration(epoch, dataloader=self.dataloader, train=True)\n \n def save(self, save_path):\n \"\"\"\n 保存模型\n \"\"\"\n self.bert_model.save_all_params(save_path)\n print(\"{} saved!\".format(save_path))\n\n def iteration(self, epoch, dataloader, train=True):\n total_loss = 0\n start_time = time.time() ## 得到当前时间\n step = 0\n for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True):\n # print(target_ids.shape)\n step += 1\n if step % 500 == 0:\n test_data = [\"日寇在京掠夺文物详情。\", \"以书结缘,把欧美,港台流行的食品类食谱汇集一堂。\", \"明天天津下雨,不知道主任还能不能来学校吃个饭。\"]\n ner_print(self.bert_model, test_data, device=self.device)\n self.bert_model.train()\n\n # 因为传入了target标签,因此会计算loss并且返回\n predictions, loss = self.bert_model(token_ids,\n labels=target_ids \n )\n # 反向传播\n if train:\n # 清空之前的梯度\n self.optimizer.zero_grad()\n # 反向传播, 获取新的梯度\n loss.backward()\n # 用获取的梯度更新模型参数\n self.optimizer.step()\n\n # 为计算当前epoch的平均loss\n total_loss += loss.item()\n \n end_time = time.time()\n spend_time = end_time - start_time\n # 打印训练信息\n print(\"epoch is \" + str(epoch)+\". loss is \" + str(total_loss) + \". spend time is \"+ str(spend_time))\n # 保存模型\n self.save(model_save_path)\n\nif __name__ == '__main__':\n \n trainer = Trainer()\n train_epoches = 25\n for epoch in range(train_epoches):\n # 训练一个epoch\n trainer.train(epoch)\n\n # with open(\"./state_dict/corase_train_update.txt\", \"a+\") as f:\n # with open(\"./corpus/粗粒度NER/人民日报ner数据.txt\", \"r\", encoding=\"utf-8\") as f1 :\n # lines = f1.readlines()\n # start = 1\n # string = \"\"\n # label = \"\"\n # for line in lines:\n # if line == \"\\n\":\n # f.write(\"\\n\")\n # continue\n # line = line.strip(\"\\n\")\n # line = line.split(\" \")\n # if _is_punctuation(line[0]) or _is_cjk_character(line[0]):\n # if string != \"\":\n # string = string.lower()\n # tokens = word_piece_tokenize(string) # 子词\n # for t in tokens:\n # if \"##\" in t:\n # f.write(t[2:] + \" \" + label + \"\\n\")\n # else :\n # f.write(t + \" \" + label + \"\\n\")\n # # f.write(string + \" \" + label + \"\\n\")\n # string = \"\"\n # label = \"\"\n # f.write(line[0] + \" \" + line[1] + \"\\n\")\n # else :\n # string += line[0]\n # label = line[1]"
] | [
[
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.optim.Adam",
"torch.arange",
"torch.cuda.is_available",
"torch.cat"
]
] |
panaali/qubovert | [
"d5ea46349d2a058954fb2cb06f559c0d3fb382c5"
] | [
"tests/problems/np/test_graphpartitioning.py"
] | [
"# Copyright 2020 Joseph T. Iosue\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nContains tests for the GraphPartitioning class.\n\"\"\"\n\nfrom qubovert.problems import GraphPartitioning\nfrom qubovert.utils import (\n solve_qubo_bruteforce, solve_quso_bruteforce,\n solve_pubo_bruteforce, solve_puso_bruteforce\n)\nfrom numpy import allclose\n\n\nedges = {(\"a\", \"b\"), (\"a\", \"c\"), (\"c\", \"d\"),\n (\"b\", \"c\"), (\"e\", \"f\"), (\"d\", \"e\")}\nproblem = GraphPartitioning(edges)\nsolutions = (\n ({\"a\", \"b\", \"c\"}, {\"d\", \"e\", \"f\"}),\n ({\"d\", \"e\", \"f\"}, {\"a\", \"b\", \"c\"})\n)\n\nproblem_weighted = GraphPartitioning({(0, 1): 1, (1, 2): 3, (0, 3): 1})\nsolutions_weighted = (\n ({0, 3}, {1, 2}),\n ({1, 2}, {0, 3})\n)\n\n\ndef test_graphpartitioning_str():\n\n assert eval(str(problem)) == problem\n\n\ndef test_graphpartitioning_properties():\n\n assert problem.E == edges\n problem.V\n problem.degree\n problem.weights\n\n\ndef test_graphpartitioning_bruteforce():\n\n assert problem.solve_bruteforce() in solutions\n assert (\n problem.solve_bruteforce(all_solutions=True) in\n (list(solutions), list(reversed(solutions)))\n )\n\n\n# QUBO\n\ndef test_graphpartitioning_qubo_solve():\n\n e, sol = solve_qubo_bruteforce(problem.to_qubo())\n solution = problem.convert_solution(sol)\n\n assert solution in solutions\n assert problem.is_solution_valid(solution)\n assert problem.is_solution_valid(sol)\n assert allclose(e, 1)\n\n e, sol = solve_qubo_bruteforce(problem.to_qubo(10))\n solution = problem.convert_solution(sol)\n\n assert solution in solutions\n assert problem.is_solution_valid(solution)\n assert problem.is_solution_valid(sol)\n assert allclose(e, 1)\n\n e, sol = solve_qubo_bruteforce(problem_weighted.to_qubo())\n solution = problem_weighted.convert_solution(sol)\n assert solution == problem_weighted.convert_solution(\n [sol[i] for i in range(problem_weighted.num_binary_variables)]\n )\n\n assert solution in solutions_weighted\n assert problem_weighted.is_solution_valid(solution)\n assert problem_weighted.is_solution_valid(sol)\n assert allclose(e, 1)\n\n\ndef test_graphpartitioning_qubo_numvars():\n\n Q = problem.to_qubo()\n assert (\n len(set(y for x in Q for y in x)) ==\n problem.num_binary_variables ==\n Q.num_binary_variables\n )\n\n\n# quso\n\ndef test_graphpartitioning_quso_solve():\n\n e, sol = solve_quso_bruteforce(problem.to_quso())\n solution = problem.convert_solution(sol)\n\n assert solution in solutions\n assert problem.is_solution_valid(solution)\n assert problem.is_solution_valid(sol)\n assert allclose(e, 1)\n\n e, sol = solve_quso_bruteforce(problem_weighted.to_quso())\n solution = problem_weighted.convert_solution(sol)\n\n assert solution in solutions_weighted\n assert problem_weighted.is_solution_valid(solution)\n assert problem_weighted.is_solution_valid(sol)\n assert allclose(e, 1)\n\n\ndef test_graphpartitioning_quso_numvars():\n\n L = problem.to_quso()\n assert L.num_binary_variables == problem.num_binary_variables\n\n\n# PUBO\n\ndef test_graphpartitioning_pubo_solve():\n\n e, sol = solve_pubo_bruteforce(problem.to_pubo())\n solution = problem.convert_solution(sol)\n\n assert solution in solutions\n assert problem.is_solution_valid(solution)\n assert problem.is_solution_valid(sol)\n assert allclose(e, 1)\n\n e, sol = solve_pubo_bruteforce(problem_weighted.to_pubo())\n solution = problem_weighted.convert_solution(sol)\n\n assert solution in solutions_weighted\n assert problem_weighted.is_solution_valid(solution)\n assert problem_weighted.is_solution_valid(sol)\n assert allclose(e, 1)\n\n\n# puso\n\ndef test_graphpartitioning_puso_solve():\n\n e, sol = solve_puso_bruteforce(problem.to_puso())\n solution = problem.convert_solution(sol)\n\n assert solution in solutions\n assert problem.is_solution_valid(solution)\n assert problem.is_solution_valid(sol)\n assert allclose(e, 1)\n\n e, sol = solve_puso_bruteforce(problem_weighted.to_puso())\n solution = problem_weighted.convert_solution(sol)\n\n assert solution in solutions_weighted\n assert problem_weighted.is_solution_valid(solution)\n assert problem_weighted.is_solution_valid(sol)\n assert allclose(e, 1)\n"
] | [
[
"numpy.allclose"
]
] |
JackKelly/Geocode | [
"b3cc89c7467384e41c5be6bcd80b36271cfc252c"
] | [
"geocode/latlons2llsoa.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nLoad a list of lat/lons from a CSV file and reverse-geocode them to LLSOA.\n\n- Jamie Taylor <[email protected]>\n- First Authored: 2020-04-16\n\"\"\"\n\nimport sys\nimport os\nimport argparse\nimport time as TIME\nimport pandas as pd\n\nfrom geocode import Geocoder, query_yes_no\n\ndef parse_options():\n \"\"\"Parse command line options.\"\"\"\n parser = argparse.ArgumentParser(description=(\"This is a command line interface (CLI) for \"\n \"the latlons2llsoa.py module\"),\n epilog=\"Jamie Taylor, 2020-04-16\")\n parser.add_argument(\"-f\", \"--input-file\", dest=\"infile\", action=\"store\", type=str,\n required=True, metavar=\"</path/to/file>\",\n help=\"Specify a CSV file containing a list of latitudes and longitudes to \"\n \"be reverse-geocoded. The file must contain the columns 'latitude' \"\n \"and 'longitude' (it can contain others, all of which will be kept).\")\n parser.add_argument(\"-o\", \"--output-file\", dest=\"outfile\", action=\"store\", type=str,\n required=True, metavar=\"</path/to/file>\", help=\"Specify an output file.\")\n parser.add_argument(\"--datazones\", dest=\"datazones\", action=\"store_true\",\n required=False, help=\"Specify to use Data Zones in Scotland.\")\n options = parser.parse_args()\n if not os.path.isfile(options.infile):\n raise Exception(f\"The input file '{options.infile}' does not exist.\")\n if os.path.isfile(options.outfile):\n check = query_yes_no(f\"The outfile '{options.outfile}' already exists, are you sure you \"\n \"wish to overwrite?\", \"no\")\n if not check:\n print(\"Quitting...\")\n sys.exit(0)\n return options\n\ndef main():\n timerstart = TIME.time()\n options = parse_options()\n with open(options.infile, \"r\") as fid:\n df = pd.read_csv(fid)\n with Geocoder(progress_bar=True) as geo:\n # import pdb; pdb.set_trace()\n df[\"llsoacd\"] = geo.reverse_geocode_llsoa(df[[\"latitude\", \"longitude\"]].to_numpy(),\n options.datazones)\n df.to_csv(options.outfile, index=False)\n print(f\"Finished, time taken: {TIME.time() - timerstart} seconds\")\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.read_csv"
]
] |
Ishticode/kornia | [
"974abb43ec72d12dbd244a2fb247bbbab8498de0"
] | [
"kornia/enhance/histogram.py"
] | [
"from typing import Optional, Tuple\n\nimport torch\n\n\ndef marginal_pdf(\n values: torch.Tensor, bins: torch.Tensor, sigma: torch.Tensor, epsilon: float = 1e-10\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Calculate the marginal probability distribution function of the input tensor based on the number of\n histogram bins.\n\n Args:\n values: shape [BxNx1].\n bins: shape [NUM_BINS].\n sigma: shape [1], gaussian smoothing factor.\n epsilon: scalar, for numerical stability.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]:\n - torch.Tensor: shape [BxN].\n - torch.Tensor: shape [BxNxNUM_BINS].\n \"\"\"\n\n if not isinstance(values, torch.Tensor):\n raise TypeError(f\"Input values type is not a torch.Tensor. Got {type(values)}\")\n\n if not isinstance(bins, torch.Tensor):\n raise TypeError(f\"Input bins type is not a torch.Tensor. Got {type(bins)}\")\n\n if not isinstance(sigma, torch.Tensor):\n raise TypeError(f\"Input sigma type is not a torch.Tensor. Got {type(sigma)}\")\n\n if not values.dim() == 3:\n raise ValueError(\"Input values must be a of the shape BxNx1.\" \" Got {}\".format(values.shape))\n\n if not bins.dim() == 1:\n raise ValueError(\"Input bins must be a of the shape NUM_BINS\" \" Got {}\".format(bins.shape))\n\n if not sigma.dim() == 0:\n raise ValueError(\"Input sigma must be a of the shape 1\" \" Got {}\".format(sigma.shape))\n\n residuals = values - bins.unsqueeze(0).unsqueeze(0)\n kernel_values = torch.exp(-0.5 * (residuals / sigma).pow(2))\n\n pdf = torch.mean(kernel_values, dim=1)\n normalization = torch.sum(pdf, dim=1).unsqueeze(1) + epsilon\n pdf = pdf / normalization\n\n return pdf, kernel_values\n\n\ndef joint_pdf(kernel_values1: torch.Tensor, kernel_values2: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:\n \"\"\"Calculate the joint probability distribution function of the input tensors based on the number of histogram\n bins.\n\n Args:\n kernel_values1: shape [BxNxNUM_BINS].\n kernel_values2: shape [BxNxNUM_BINS].\n epsilon: scalar, for numerical stability.\n\n Returns:\n shape [BxNUM_BINSxNUM_BINS].\n \"\"\"\n\n if not isinstance(kernel_values1, torch.Tensor):\n raise TypeError(f\"Input kernel_values1 type is not a torch.Tensor. Got {type(kernel_values1)}\")\n\n if not isinstance(kernel_values2, torch.Tensor):\n raise TypeError(f\"Input kernel_values2 type is not a torch.Tensor. Got {type(kernel_values2)}\")\n\n if not kernel_values1.dim() == 3:\n raise ValueError(\"Input kernel_values1 must be a of the shape BxN.\" \" Got {}\".format(kernel_values1.shape))\n\n if not kernel_values2.dim() == 3:\n raise ValueError(\"Input kernel_values2 must be a of the shape BxN.\" \" Got {}\".format(kernel_values2.shape))\n\n if kernel_values1.shape != kernel_values2.shape:\n raise ValueError(\n \"Inputs kernel_values1 and kernel_values2 must have the same shape.\"\n \" Got {} and {}\".format(kernel_values1.shape, kernel_values2.shape)\n )\n\n joint_kernel_values = torch.matmul(kernel_values1.transpose(1, 2), kernel_values2)\n normalization = torch.sum(joint_kernel_values, dim=(1, 2)).view(-1, 1, 1) + epsilon\n pdf = joint_kernel_values / normalization\n\n return pdf\n\n\ndef histogram(x: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:\n \"\"\"Estimate the histogram of the input tensor.\n\n The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.\n\n Args:\n x: Input tensor to compute the histogram with shape :math:`(B, D)`.\n bins: The number of bins to use the histogram :math:`(N_{bins})`.\n bandwidth: Gaussian smoothing factor with shape shape [1].\n epsilon: A scalar, for numerical stability.\n\n Returns:\n Computed histogram of shape :math:`(B, N_{bins})`.\n\n Examples:\n >>> x = torch.rand(1, 10)\n >>> bins = torch.torch.linspace(0, 255, 128)\n >>> hist = histogram(x, bins, bandwidth=torch.tensor(0.9))\n >>> hist.shape\n torch.Size([1, 128])\n \"\"\"\n\n pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)\n\n return pdf\n\n\ndef histogram2d(\n x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10\n) -> torch.Tensor:\n \"\"\"Estimate the 2d histogram of the input tensor.\n\n The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.\n\n Args:\n x1: Input tensor to compute the histogram with shape :math:`(B, D1)`.\n x2: Input tensor to compute the histogram with shape :math:`(B, D2)`.\n bins: The number of bins to use the histogram :math:`(N_{bins})`.\n bandwidth: Gaussian smoothing factor with shape shape [1].\n epsilon: A scalar, for numerical stability. Default: 1e-10.\n\n Returns:\n Computed histogram of shape :math:`(B, N_{bins}), N_{bins})`.\n\n Examples:\n >>> x1 = torch.rand(2, 32)\n >>> x2 = torch.rand(2, 32)\n >>> bins = torch.torch.linspace(0, 255, 128)\n >>> hist = histogram2d(x1, x2, bins, bandwidth=torch.tensor(0.9))\n >>> hist.shape\n torch.Size([2, 128, 128])\n \"\"\"\n\n _, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon)\n _, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon)\n\n pdf = joint_pdf(kernel_values1, kernel_values2)\n\n return pdf\n\n\ndef image_histogram2d(\n image: torch.Tensor,\n min: float = 0.0,\n max: float = 255.0,\n n_bins: int = 256,\n bandwidth: Optional[float] = None,\n centers: Optional[torch.Tensor] = None,\n return_pdf: bool = False,\n kernel: str = \"triangular\",\n eps: float = 1e-10,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Estimate the histogram of the input image(s).\n\n The calculation uses triangular kernel density estimation.\n\n Args:\n image: Input tensor to compute the histogram with shape\n :math:`(H, W)`, :math:`(C, H, W)` or :math:`(B, C, H, W)`.\n min: Lower end of the interval (inclusive).\n max: Upper end of the interval (inclusive). Ignored when\n :attr:`centers` is specified.\n n_bins: The number of histogram bins. Ignored when\n :attr:`centers` is specified.\n bandwidth: Smoothing factor. If not specified or equal to -1,\n :math:`(bandwidth = (max - min) / n_bins)`.\n centers: Centers of the bins with shape :math:`(n_bins,)`.\n If not specified or empty, it is calculated as centers of\n equal width bins of [min, max] range.\n return_pdf: If True, also return probability densities for\n each bin.\n kernel: kernel to perform kernel density estimation\n ``(`triangular`, `gaussian`, `uniform`, `epanechnikov`)``.\n\n Returns:\n Computed histogram of shape :math:`(bins)`, :math:`(C, bins)`,\n :math:`(B, C, bins)`.\n Computed probability densities of shape :math:`(bins)`, :math:`(C, bins)`,\n :math:`(B, C, bins)`, if return_pdf is ``True``. Tensor of zeros with shape\n of the histogram otherwise.\n \"\"\"\n if image is not None and not isinstance(image, torch.Tensor):\n raise TypeError(f\"Input image type is not a torch.Tensor. Got {type(image)}.\")\n\n if centers is not None and not isinstance(centers, torch.Tensor):\n raise TypeError(f\"Bins' centers type is not a torch.Tensor. Got {type(centers)}.\")\n\n if centers is not None and len(centers.shape) > 0 and centers.dim() != 1:\n raise ValueError(f\"Bins' centers must be a torch.Tensor of the shape (n_bins,). Got {centers.shape}.\")\n\n if not isinstance(min, float):\n raise TypeError(f'Type of lower end of the range is not a float. Got {type(min)}.')\n\n if not isinstance(max, float):\n raise TypeError(f\"Type of upper end of the range is not a float. Got {type(min)}.\")\n\n if not isinstance(n_bins, int):\n raise TypeError(f\"Type of number of bins is not an int. Got {type(n_bins)}.\")\n\n if bandwidth is not None and not isinstance(bandwidth, float):\n raise TypeError(f\"Bandwidth type is not a float. Got {type(bandwidth)}.\")\n\n if not isinstance(return_pdf, bool):\n raise TypeError(f\"Return_pdf type is not a bool. Got {type(return_pdf)}.\")\n\n if bandwidth is None:\n bandwidth = (max - min) / n_bins\n\n if centers is None:\n centers = min + bandwidth * (torch.arange(n_bins, device=image.device, dtype=image.dtype) + 0.5)\n centers = centers.reshape(-1, 1, 1, 1, 1)\n\n u = torch.abs(image.unsqueeze(0) - centers) / bandwidth\n\n if kernel == \"gaussian\":\n kernel_values = torch.exp(-0.5 * u ** 2)\n elif kernel in (\"triangular\", \"uniform\", \"epanechnikov\",):\n # compute the mask and cast to floating point\n mask = (u <= 1).to(u.dtype)\n if kernel == \"triangular\":\n kernel_values = (1. - u) * mask\n elif kernel == \"uniform\":\n kernel_values = torch.ones_like(u) * mask\n else: # kernel == \"epanechnikov\"\n kernel_values = (1. - u ** 2) * mask\n else:\n raise ValueError(f\"Kernel must be 'triangular', 'gaussian', \" f\"'uniform' or 'epanechnikov'. Got {kernel}.\")\n\n hist = torch.sum(kernel_values, dim=(-2, -1)).permute(1, 2, 0)\n\n if return_pdf:\n normalization = torch.sum(hist, dim=-1, keepdim=True) + eps\n pdf = hist / normalization\n if image.dim() == 2:\n hist = hist.squeeze()\n pdf = pdf.squeeze()\n elif image.dim() == 3:\n hist = hist.squeeze(0)\n pdf = pdf.squeeze(0)\n return hist, pdf\n\n if image.dim() == 2:\n hist = hist.squeeze()\n elif image.dim() == 3:\n hist = hist.squeeze(0)\n\n return hist, torch.zeros_like(hist)\n"
] | [
[
"torch.sum",
"torch.ones_like",
"torch.zeros_like",
"torch.exp",
"torch.arange",
"torch.mean"
]
] |
Rosna/P4ML-UI | [
"edf0dd830588f03b197e4d6532830a5aedd88424"
] | [
"spider/featurization/load_subclip_audio.py"
] | [
"import argparse\nimport librosa\nimport numpy as np\n\ndef make_subclips(audio, sr, clip_size, pad=True):\n # Given a list of audio files and corresponding sample rates,\n # return a 2D list of subclips, each of size clip_size\n # Optional padding takes care of audio files shorter than clip size\n clips = []\n for idx, a in enumerate(audio):\n\n # Size of a single clip in samples\n step = int(sr[idx] * clip_size)\n\n # Optional padding for short clips\n overhang = len(a) % step\n if overhang != 0 and pad:\n a = np.concatenate([a, np.zeros(step - overhang)])\n\n subclips = []\n for start in range(0, len(a), step):\n\n end = start + step\n if end > len(a):\n break\n\n subclips.append(a[start : end])\n\n return subclips\n\ndef main(audio_file, clip_size):\n\n # In python 2.7, librosa.load does not correctly handle 24-bit wav files.\n # This is resolved in python 3.x\n # \n # If the sr parameter is set to None, loads the actual sampling rate\n # from the audio file. Otherwise, will load the audio file and resample\n # it to the given sample rate. This is good if you want all audio at the\n # same sample rate, but can be slow. Default is 22050 Hz.\n audio, sr = librosa.load(audio_file, sr=None)\n\n # We just have one audio file here, but this should work for any number\n audio_subclips = make_subclips([audio], [sr], 1.0)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--audio_file', type=str, required=True)\n parser.add_argument('--clip_size', type=float, default=0)\n args = parser.parse_args()\n main(args.audio_file, args.clip_size)\n"
] | [
[
"numpy.zeros"
]
] |
Johnzhjw/CIT2FR-FL-NAS | [
"53e93075ff1834ab817ad6359025ddafd20e6ef4"
] | [
"run_manager.py"
] | [
"# Once for All: Train One Network and Specialize it for Efficient Deployment\n# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han\n# International Conference on Learning Representations (ICLR), 2020.\n\nimport os\nimport time\nimport json\nimport math\nfrom tqdm import tqdm\n\nimport numpy as np\n\nimport copy\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torchvision\n\n# from imagenet_codebase.utils import *\nfrom ofa.imagenet_codebase.utils import count_parameters, count_net_flops, measure_net_latency, \\\n cross_entropy_loss_with_soft_target, cross_entropy_with_label_smoothing\nfrom ofa.utils import AverageMeter, accuracy\n\n\nclass RunConfig:\n\n def __init__(self, n_epochs, init_lr, lr_schedule_type, lr_schedule_param,\n dataset, train_batch_size, test_batch_size, valid_size,\n opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys,\n mixup_alpha,\n model_init, validation_frequency, print_frequency):\n self.n_epochs = n_epochs\n self.init_lr = init_lr\n self.lr_schedule_type = lr_schedule_type\n self.lr_schedule_param = lr_schedule_param\n\n self.dataset = dataset\n self.train_batch_size = train_batch_size\n self.test_batch_size = test_batch_size\n self.valid_size = valid_size\n\n self.opt_type = opt_type\n self.opt_param = opt_param\n self.weight_decay = weight_decay\n self.label_smoothing = label_smoothing\n self.no_decay_keys = no_decay_keys\n\n self.mixup_alpha = mixup_alpha\n\n self.model_init = model_init\n self.validation_frequency = validation_frequency\n self.print_frequency = print_frequency\n\n @property\n def config(self):\n config = {}\n for key in self.__dict__:\n if not key.startswith('_'):\n config[key] = self.__dict__[key]\n return config\n\n def copy(self):\n return RunConfig(**self.config)\n\n \"\"\" learning rate \"\"\"\n\n def calc_learning_rate(self, epoch, batch=0, nBatch=None):\n if self.lr_schedule_type == 'cosine':\n T_total = self.n_epochs * nBatch\n T_cur = epoch * nBatch + batch\n lr = 0.5 * self.init_lr * (1 + math.cos(math.pi * T_cur / T_total))\n elif self.lr_schedule_type is None:\n lr = self.init_lr\n else:\n raise ValueError('do not support: %s' % self.lr_schedule_type)\n return lr\n\n def adjust_learning_rate(self, optimizer, epoch, batch=0, nBatch=None):\n \"\"\" adjust learning of a given optimizer and return the new learning rate \"\"\"\n new_lr = self.calc_learning_rate(epoch, batch, nBatch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n return new_lr\n\n def warmup_adjust_learning_rate(self, optimizer, T_total, nBatch, epoch, batch=0, warmup_lr=0):\n T_cur = epoch * nBatch + batch + 1\n new_lr = T_cur / T_total * (self.init_lr - warmup_lr) + warmup_lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n return new_lr\n\n \"\"\" data provider \"\"\"\n\n @property\n def data_provider(self):\n raise NotImplementedError\n\n def train_FL_loader(self, _):\n return self.data_provider.train_splits[_]\n\n @property\n def train_loader(self):\n return self.data_provider.train\n\n @property\n def valid_loader(self):\n return self.data_provider.valid\n\n @property\n def test_loader(self):\n return self.data_provider.test\n\n def random_sub_train_loader(self, n_images, batch_size, num_worker=None, num_replicas=None, rank=None, tag_FL=-1):\n return self.data_provider.build_sub_train_loader(n_images, batch_size, num_worker, num_replicas, rank, tag_FL)\n\n \"\"\" optimizer \"\"\"\n\n def build_optimizer(self, net_params):\n if self.no_decay_keys is not None:\n assert isinstance(net_params, list) and len(net_params) == 2\n net_params = [\n {'params': net_params[0], 'weight_decay': self.weight_decay},\n {'params': net_params[1], 'weight_decay': 0},\n ]\n else:\n net_params = [{'params': net_params, 'weight_decay': self.weight_decay}]\n\n if self.opt_type == 'sgd':\n opt_param = {} if self.opt_param is None else self.opt_param\n momentum, nesterov = opt_param.get('momentum', 0.9), opt_param.get('nesterov', True)\n optimizer = torch.optim.SGD(net_params, self.init_lr, momentum=momentum, nesterov=nesterov)\n elif self.opt_type == 'adam':\n optimizer = torch.optim.Adam(net_params, self.init_lr)\n else:\n raise NotImplementedError\n return optimizer\n\n\ndef get_net_info(net, input_shape=(3, 224, 224), measure_latency=None, print_info=True):\n net_info = {}\n if isinstance(net, nn.DataParallel):\n net = net.module\n\n # parameters\n net_info['params'] = count_parameters(net)\n\n # flops\n net_info['flops'] = count_net_flops(net, [2] + list(input_shape))/2\n\n # latencies\n latency_types = [] if measure_latency is None else measure_latency.split('#')\n for l_type in latency_types:\n latency, measured_latency = measure_net_latency(net, l_type, fast=False, input_shape=input_shape)\n net_info['%s latency' % l_type] = {\n 'val': latency,\n 'hist': measured_latency\n }\n\n if print_info:\n print(net)\n print('Total training params: %.2fM' % (net_info['params'] / 1e6))\n print('Total FLOPs: %.2fM' % (net_info['flops'] / 1e6))\n for l_type in latency_types:\n print('Estimated %s latency: %.3fms' % (l_type, net_info['%s latency' % l_type]['val']))\n\n return net_info\n\n\nclass RunManager:\n\n def __init__(self, path, net, run_config: RunConfig, init=True, measure_latency=None, no_gpu=False, mix_prec=None):\n self.path = path\n self.net = net\n self.run_config = run_config\n self.mix_prec = mix_prec\n\n self.best_acc = 0\n self.start_epoch = 0\n\n os.makedirs(self.path, exist_ok=True)\n\n # move network to GPU if available\n if torch.cuda.is_available() and (not no_gpu):\n self.device = torch.device('cuda:0')\n self.net = self.net.to(self.device)\n cudnn.benchmark = True\n else:\n self.device = torch.device('cpu')\n # initialize model (default)\n if init:\n self.network.init_model(run_config.model_init)\n\n # net info\n net_info = get_net_info(self.net, self.run_config.data_provider.data_shape, measure_latency, True)\n with open('%s/net_info.txt' % self.path, 'w') as fout:\n fout.write(json.dumps(net_info, indent=4) + '\\n')\n try:\n fout.write(self.network.module_str)\n except Exception:\n pass\n\n # criterion\n if isinstance(self.run_config.mixup_alpha, float):\n self.train_criterion = cross_entropy_loss_with_soft_target\n elif self.run_config.label_smoothing > 0:\n self.train_criterion = lambda pred, target: \\\n cross_entropy_with_label_smoothing(pred, target, self.run_config.label_smoothing)\n else:\n self.train_criterion = nn.CrossEntropyLoss()\n self.test_criterion = nn.CrossEntropyLoss()\n\n # optimizer\n if self.run_config.no_decay_keys:\n keys = self.run_config.no_decay_keys.split('#')\n net_params = [\n self.network.get_parameters(keys, mode='exclude'), # parameters with weight decay\n self.network.get_parameters(keys, mode='include'), # parameters without weight decay\n ]\n else:\n try:\n net_params = self.network.weight_parameters()\n except Exception:\n net_params = self.network.parameters()\n self.optimizer = self.run_config.build_optimizer(net_params)\n\n if mix_prec is not None:\n from apex import amp\n self.network, self.optimizer = amp.initialize(self.network, self.optimizer, opt_level=mix_prec)\n\n self.net = torch.nn.DataParallel(self.net)\n\n def init_FL(self, flag_reset_running_statistics):\n \"\"\" FL \"\"\"\n if self.run_config.flag_FL:\n self.nets_FL = []\n self.optimizers_FL = []\n for _ in range(self.run_config.size_FL):\n self.nets_FL.append(copy.deepcopy(self.net))\n if flag_reset_running_statistics:\n self.reset_running_statistics(self.network_FL(_), _)\n for _ in range(self.run_config.size_FL):\n if self.run_config.no_decay_keys:\n keys = self.run_config.no_decay_keys.split('#')\n net_params = [\n self.network_FL(_).get_parameters(keys, mode='exclude'), # parameters with weight decay\n self.network_FL(_).get_parameters(keys, mode='include'), # parameters without weight decay\n ]\n else:\n try:\n net_params = self.network_FL(_).weight_parameters()\n except Exception:\n net_params = self.network_FL(_).parameters()\n self.optimizers_FL.append(self.run_config.build_optimizer(net_params))\n\n \"\"\" save path and log path \"\"\"\n\n @property\n def save_path(self):\n if self.__dict__.get('_save_path', None) is None:\n save_path = os.path.join(self.path, 'checkpoint')\n os.makedirs(save_path, exist_ok=True)\n self.__dict__['_save_path'] = save_path\n return self.__dict__['_save_path']\n\n @property\n def logs_path(self):\n if self.__dict__.get('_logs_path', None) is None:\n logs_path = os.path.join(self.path, 'logs')\n os.makedirs(logs_path, exist_ok=True)\n self.__dict__['_logs_path'] = logs_path\n return self.__dict__['_logs_path']\n\n def network_FL(self, _):\n if isinstance(self.nets_FL[_], nn.DataParallel):\n return self.nets_FL[_].module\n else:\n return self.nets_FL[_]\n\n @property\n def network(self):\n if isinstance(self.net, nn.DataParallel):\n return self.net.module\n else:\n return self.net\n\n @network.setter\n def network(self, new_val):\n if isinstance(self.net, nn.DataParallel):\n self.net.module = new_val\n else:\n self.net = new_val\n\n def write_log(self, log_str, prefix='valid', should_print=True):\n \"\"\" prefix: valid, train, test \"\"\"\n if prefix in ['valid', 'test']:\n with open(os.path.join(self.logs_path, 'valid_console.txt'), 'a') as fout:\n fout.write(log_str + '\\n')\n fout.flush()\n if prefix in ['valid', 'test', 'train']:\n with open(os.path.join(self.logs_path, 'train_console.txt'), 'a') as fout:\n if prefix in ['valid', 'test']:\n fout.write('=' * 10)\n fout.write(log_str + '\\n')\n fout.flush()\n else:\n with open(os.path.join(self.logs_path, '%s.txt' % prefix), 'a') as fout:\n fout.write(log_str + '\\n')\n fout.flush()\n if should_print:\n print(log_str)\n\n \"\"\" save and load models \"\"\"\n\n def save_model(self, checkpoint=None, is_best=False, model_name=None):\n if checkpoint is None:\n checkpoint = {'state_dict': self.network.state_dict()}\n\n if model_name is None:\n model_name = 'checkpoint.pth.tar'\n\n if self.mix_prec is not None:\n from apex import amp\n checkpoint['amp'] = amp.state_dict()\n\n checkpoint['dataset'] = self.run_config.dataset # add `dataset` info to the checkpoint\n latest_fname = os.path.join(self.save_path, 'latest.txt')\n model_path = os.path.join(self.save_path, model_name)\n with open(latest_fname, 'w') as fout:\n fout.write(model_path + '\\n')\n torch.save(checkpoint, model_path)\n\n if is_best:\n best_path = os.path.join(self.save_path, 'model_best.pth.tar')\n torch.save({'state_dict': checkpoint['state_dict']}, best_path)\n\n def load_model(self, model_fname=None):\n latest_fname = os.path.join(self.save_path, 'latest.txt')\n if model_fname is None and os.path.exists(latest_fname):\n with open(latest_fname, 'r') as fin:\n model_fname = fin.readline()\n if model_fname[-1] == '\\n':\n model_fname = model_fname[:-1]\n try:\n if model_fname is None or not os.path.exists(model_fname):\n model_fname = '%s/checkpoint.pth.tar' % self.save_path\n with open(latest_fname, 'w') as fout:\n fout.write(model_fname + '\\n')\n print(\"=> loading checkpoint '{}'\".format(model_fname))\n\n if torch.cuda.is_available():\n checkpoint = torch.load(model_fname)\n else:\n checkpoint = torch.load(model_fname, map_location='cpu')\n\n self.network.load_state_dict(checkpoint['state_dict'])\n\n if 'epoch' in checkpoint:\n self.start_epoch = checkpoint['epoch'] + 1\n if 'best_acc' in checkpoint:\n self.best_acc = checkpoint['best_acc']\n if 'optimizer' in checkpoint:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n if self.mix_prec is not None and 'amp' in checkpoint:\n from apex import amp\n amp.load_state_dict(checkpoint['amp'])\n\n print(\"=> loaded checkpoint '{}'\".format(model_fname))\n except Exception:\n print('fail to load checkpoint from %s' % self.save_path)\n\n def save_config(self):\n \"\"\" dump run_config and net_config to the model_folder \"\"\"\n net_save_path = os.path.join(self.path, 'net.config')\n json.dump(self.network.config, open(net_save_path, 'w'), indent=4)\n print('Network configs dump to %s' % net_save_path)\n\n run_save_path = os.path.join(self.path, 'run.config')\n json.dump(self.run_config.config, open(run_save_path, 'w'), indent=4)\n print('Run configs dump to %s' % run_save_path)\n\n \"\"\" train and test \"\"\"\n\n def validate(self, epoch=0, is_test=True, run_str='', net=None, data_loader=None, no_logs=False):\n if net is None:\n net = self.net\n if not isinstance(net, nn.DataParallel):\n net = nn.DataParallel(net)\n\n if data_loader is None:\n if is_test:\n data_loader = self.run_config.test_loader\n else:\n data_loader = self.run_config.valid_loader\n\n net.eval()\n\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n with torch.no_grad():\n with tqdm(total=len(data_loader),\n desc='Validate Epoch #{} {}'.format(epoch + 1, run_str), disable=no_logs) as t:\n for i, (images, labels) in enumerate(data_loader):\n images, labels = images.to(self.device), labels.to(self.device)\n # compute output\n output = net(images)\n loss = self.test_criterion(output, labels.long())\n # measure accuracy and record loss\n acc1 = accuracy(output, labels, topk=(1,))\n\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0].item(), images.size(0))\n t.set_postfix({\n 'loss': losses.avg,\n 'top1': top1.avg,\n 'img_size': images.size(2),\n })\n t.update(1)\n return losses.avg, top1.avg\n\n def validate_all_resolution(self, epoch=0, is_test=True, net=None):\n if net is None:\n net = self.network\n if isinstance(self.run_config.data_provider.image_size, list):\n img_size_list, loss_list, top1_list, top5_list = [], [], [], []\n for img_size in self.run_config.data_provider.image_size:\n img_size_list.append(img_size)\n self.run_config.data_provider.assign_active_img_size(img_size)\n if not self.run_config.flag_FL:\n self.reset_running_statistics(net=net)\n else:\n self.reset_running_statistics(net=None, tag_FL=self.run_config.size_FL)\n loss, top1 = self.validate(epoch, is_test, net=net)\n loss_list.append(loss)\n top1_list.append(top1)\n return img_size_list, loss_list, top1_list\n else:\n loss, top1 = self.validate(epoch, is_test, net=net)\n return [self.run_config.data_provider.active_img_size], [loss], [top1]\n\n def train_one_epoch(self, args, epoch, warmup_epochs=0, warmup_lr=0, tag_FL=-1):\n # switch to train mode\n if tag_FL >= 0:\n self.nets_FL[tag_FL].train()\n else:\n self.net.train()\n\n if tag_FL >= 0:\n data_loader = self.run_config.train_FL_loader(tag_FL)\n else:\n data_loader = self.run_config.train_loader\n nBatch = len(data_loader)\n\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n data_time = AverageMeter()\n\n with tqdm(total=nBatch,\n desc='Train Epoch #{}'.format(epoch + 1)) as t:\n end = time.time()\n for i, (images, labels) in enumerate(data_loader):\n data_time.update(time.time() - end)\n if tag_FL >= 0:\n if epoch < warmup_epochs:\n new_lr = self.run_config.warmup_adjust_learning_rate(\n self.optimizers_FL[tag_FL], warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr,\n )\n else:\n new_lr = self.run_config.adjust_learning_rate(self.optimizers_FL[tag_FL], epoch - warmup_epochs, i, nBatch)\n else:\n if epoch < warmup_epochs:\n new_lr = self.run_config.warmup_adjust_learning_rate(\n self.optimizer, warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr,\n )\n else:\n new_lr = self.run_config.adjust_learning_rate(self.optimizer, epoch - warmup_epochs, i, nBatch)\n\n images, labels = images.to(self.device), labels.to(self.device)\n target = labels\n\n # soft target\n if args.teacher_model is not None:\n args.teacher_model.train()\n with torch.no_grad():\n soft_logits = args.teacher_model(images).detach()\n soft_label = F.softmax(soft_logits, dim=1)\n\n # compute output\n if isinstance(self.network, torchvision.models.Inception3):\n if tag_FL >= 0:\n output, aux_outputs = self.nets_FL[tag_FL](images)\n else:\n output, aux_outputs = self.net(images)\n loss1 = self.train_criterion(output, labels.long())\n loss2 = self.train_criterion(aux_outputs, labels.long())\n loss = loss1 + 0.4 * loss2\n else:\n if tag_FL >= 0:\n output = self.nets_FL[tag_FL](images)\n else:\n output = self.net(images)\n loss = self.train_criterion(output, labels.long())\n\n if args.teacher_model is None:\n loss_type = 'ce'\n else:\n if args.kd_type == 'ce':\n kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)\n else:\n kd_loss = F.mse_loss(output, soft_logits)\n loss = args.kd_ratio * kd_loss + loss\n loss_type = '%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type)\n\n # compute gradient and do SGD step\n if tag_FL >= 0:\n self.nets_FL[tag_FL].zero_grad() # or self.optimizer.zero_grad()\n else:\n self.net.zero_grad() # or self.optimizer.zero_grad()\n if self.mix_prec is not None:\n from apex import amp\n if tag_FL >= 0:\n with amp.scale_loss(loss, self.optimizers_FL[tag_FL]) as scaled_loss:\n scaled_loss.backward()\n else:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n if tag_FL >= 0:\n self.optimizers_FL[tag_FL].step()\n else:\n self.optimizer.step()\n\n # measure accuracy and record loss\n acc1 = accuracy(output, target, topk=(1,))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0].item(), images.size(0))\n\n t.set_postfix({\n 'loss': losses.avg,\n 'top1': top1.avg,\n 'img_size': images.size(2),\n 'lr': new_lr,\n 'loss_type': loss_type,\n 'data_time': data_time.avg,\n })\n t.update(1)\n end = time.time()\n return losses.avg, top1.avg\n\n def FedAvg(self):\n if self.run_config.flag_FL:\n with torch.no_grad():\n base_state = self.network.state_dict()\n all_states = []\n for _ in range(self.run_config.size_FL):\n model = self.network_FL(_)\n all_states.append(model.state_dict())\n for name in base_state:\n for _ in range(self.run_config.size_FL):\n # print(all_states[_][name].shape)\n # print(all_states[_][name])\n tmp_state = (all_states[_][name] / self.run_config.size_FL) if _ == 0 else \\\n tmp_state + (all_states[_][name] / self.run_config.size_FL)\n base_state[name].copy_(tmp_state)\n self.network.load_state_dict(base_state)\n for _ in range(self.run_config.size_FL):\n self.network_FL(_).load_state_dict(base_state)\n\n def train(self, args, warmup_epoch=0, warmup_lr=0, flag_reset_running_statistics=False):\n self.init_FL(flag_reset_running_statistics)\n\n for epoch in range(self.start_epoch, self.run_config.n_epochs + warmup_epoch):\n if not self.run_config.flag_FL:\n train_loss, train_top1 = self.train_one_epoch(args, epoch, warmup_epoch, warmup_lr)\n else:\n train_loss, train_top1 = [], []\n for _ in range(self.run_config.size_FL):\n loss, top1 = self.train_one_epoch(args, epoch, warmup_epoch, warmup_lr, _)\n train_loss.append(loss)\n train_top1.append(top1)\n train_loss = np.mean(train_loss)\n train_top1 = np.mean(train_top1)\n self.FedAvg()\n\n if (epoch + 1) % self.run_config.validation_frequency == 0:\n img_size, val_loss, val_acc = self.validate_all_resolution(epoch=epoch, is_test=False)\n\n is_best = np.mean(val_acc) > self.best_acc\n self.best_acc = max(self.best_acc, np.mean(val_acc))\n val_log = 'Valid [{0}/{1}]\\tloss {2:.3f}\\ttop-1 acc {3:.3f} ({4:.3f})'. \\\n format(epoch + 1 - warmup_epoch, self.run_config.n_epochs,\n np.mean(val_loss), np.mean(val_acc), self.best_acc)\n val_log += '\\tTrain top-1 {top1:.3f}\\tloss {train_loss:.3f}\\t'. \\\n format(top1=train_top1, train_loss=train_loss)\n for i_s, v_a in zip(img_size, val_acc):\n val_log += '(%d, %.3f), ' % (i_s, v_a)\n self.write_log(val_log, prefix='valid', should_print=False)\n else:\n is_best = False\n\n self.save_model({\n 'epoch': epoch,\n 'best_acc': self.best_acc,\n 'optimizer': self.optimizer.state_dict(),\n 'state_dict': self.network.state_dict(),\n }, is_best=is_best)\n\n return self.network\n\n def reset_running_statistics(self, net=None, tag_FL=-1):\n from ofa.elastic_nn.utils import set_running_statistics\n if tag_FL == -1:\n if net is None:\n net = self.network\n sub_train_loader = self.run_config.random_sub_train_loader(2000, 100)\n set_running_statistics(net, sub_train_loader)\n elif tag_FL == self.run_config.size_FL:\n if not self.run_config.flag_FL:\n print('Wrong FL client ID')\n import sys\n sys.exit()\n for _ in range(tag_FL):\n self.reset_running_statistics(self.network_FL(_), _)\n self.FedAvg()\n else:\n if tag_FL < 0 or tag_FL >= self.run_config.size_FL or not self.run_config.flag_FL:\n print('Wrong FL client ID')\n import sys\n sys.exit()\n if net is None:\n net = self.network_FL(tag_FL)\n sub_train_loader = self.run_config.random_sub_train_loader(2000, 100, tag_FL=tag_FL)\n set_running_statistics(net, sub_train_loader)\n"
] | [
[
"torch.nn.functional.mse_loss",
"torch.nn.functional.softmax",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel",
"numpy.mean"
]
] |
crsqq/OpenNE | [
"0cecb2b5076b878d2f07ed1130682aeab6ce37f1"
] | [
"wrapper.py"
] | [
"from OpenNE.src.libnrl import graph\nfrom OpenNE.src.libnrl import grarep\nfrom OpenNE.src.libnrl import line\nfrom OpenNE.src.libnrl import node2vec\nfrom OpenNE.src.libnrl.gcn import gcnAPI\nfrom itertools import product\nimport networkx as nx\nimport numpy as np\nimport tensorflow as tf\n\ndef nx_to_openne_graph(nxgraph, stringify_nodes=True):\n dg = nx.to_directed(nxgraph).copy()\n if stringify_nodes:\n nx.relabel_nodes(dg, {n:str(n) for n in dg.nodes}, copy=False)\n nx.set_edge_attributes(dg, 1.0, 'weight')\n g = graph.Graph()\n g.G = dg\n g.encode_node()\n return g\n\n\nclass OpenNEEmbeddingBase:\n def __init__(self, thisgraph, parameters):\n self.graph = nx_to_openne_graph(thisgraph)\n self.embeddings = None\n self.parameters = parameters\n def run(self):\n raise NotImplementedError('')\n def update_parameters(self, new_parameters):\n self.parameters = new_parameters\n self.embeddings = None\n def get_embeddings(self):\n if not self.embeddings:\n self.run()\n return self.embeddings\n def get_vectors(self):\n return self.get_embeddings().vectors\n\n @staticmethod\n def valid_parameter_combinations(parameterSpace):\n \"\"\"\n returns all possible combinations, if some are not valid / useful,\n this method needs to be overwritten\n \"\"\"\n all_combinations = product(*parameterSpace.values())\n return [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]\n\nclass Node2VecEmbedding(OpenNEEmbeddingBase):\n \"\"\"\n {'dim': 2, 'num_paths': 80, 'p': 1, 'path_length': 10, 'q': 1}\n \"\"\"\n def run(self):\n self.embeddings = node2vec.Node2vec(self.graph, retrainable=True, **self.parameters)\n\n def retrain(self, new_graph, num_paths=80, epochs=5):\n g = nx_to_openne_graph(new_graph)\n self.embeddings.retrain(g, num_paths=num_paths, epochs=epochs)\n\nclass GraRepEmbedding(OpenNEEmbeddingBase):\n def run(self):\n self.embeddings = grarep.GraRep(self.graph, **self.parameters)\n\n @staticmethod\n def valid_parameter_combinations(parameterSpace):\n \"\"\"\n returns all possible combinations, if some are not valid / useful,\n this method needs to be overwritten\n \"\"\"\n all_combinations = product(*parameterSpace.values())\n all_combinations = [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]\n return [x for x in all_combinations if x[\"dim\"] % x[\"Kstep\"] == 0]\n\nclass LINEEmbedding(OpenNEEmbeddingBase):\n def run(self):\n tf.reset_default_graph()\n self.embeddings = line.LINE(self.graph, **self.parameters)\n\n\n\nfrom scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh\n\nclass SpectralClusteringEmbedding(OpenNEEmbeddingBase):\n def __init__(self, thisgraph, parameters):\n self.graph = thisgraph\n self.embeddings = None\n self.parameters = parameters\n\n nx.relabel_nodes(self.graph, {n:str(n) for n in self.graph.nodes}, copy=False)\n def run(self):\n L = nx.normalized_laplacian_matrix(self.graph)\n evalues, evectors = a,b = largest_eigsh(L, k=self.parameters['dim'])\n self.embeddings = {str(n):v for n,v in zip(self.graph.nodes, evectors)}\n def get_vectors(self):\n return self.get_embeddings()\n\n\ndef _RandNE(graph, dim, q, beta):\n d = dim\n A = nx.to_scipy_sparse_matrix(graph)\n\n R = np.random.normal(loc=0, scale=1/d, size=(A.shape[0], d))\n\n U0, _ = np.linalg.qr(R)\n\n Ulist = [U0]\n for i in range(q):\n Ulist.append(A.dot(Ulist[-1]))\n\n Ulist = np.array(Ulist)\n\n betas = (beta**np.arange(0, q+1))\n\n U = np.array([scalar*m for scalar,m in zip(betas, Ulist)]).sum(axis=0)\n return U\n\nclass RandNEEmbedding(OpenNEEmbeddingBase):\n def __init__(self, thisgraph, parameters):\n self.graph = thisgraph\n self.embeddings = None\n self.parameters = parameters\n def run(self):\n U = _RandNE(self.graph, **self.parameters)\n self.embeddings = {str(n):v for n,v in zip(self.graph.nodes, U)}\n def get_vectors(self):\n return self.get_embeddings()\n"
] | [
[
"numpy.linalg.qr",
"numpy.arange",
"scipy.sparse.linalg.eigen.arpack.eigsh",
"numpy.random.normal",
"tensorflow.reset_default_graph",
"numpy.array"
]
] |
thibnoel/solopython | [
"0977c6de8bcee2b8ecabaa46f6953e7a05334af1"
] | [
"minimal_controler.py"
] | [
"# coding: utf8\nfrom coll_avoidance_modules.solo_coll_wrapper_c import *\nfrom coll_avoidance_modules.collisions_controller import *\n\nfrom PA_utils_mpc import PyBulletSimulator\nimport numpy as np\nimport argparse\n# from solo12 import Solo12\n# from pynput import keyboard\n\nfrom PA_logger import Logger\n# from utils.qualisysClient import QualisysClient\n\nimport os\nimport sys\nsys.path.insert(0, './mpctsid')\n\nDT = 0.002\n\nkey_pressed = False\n\n\ndef on_press(key):\n \"\"\"Wait for a specific key press on the keyboard\n\n Args:\n key (keyboard.Key): the key we want to wait for\n \"\"\"\n global key_pressed\n try:\n if key == keyboard.Key.enter:\n key_pressed = True\n # Stop listener\n return False\n except AttributeError:\n print('Unknown key {0} pressed'.format(key))\n\n\ndef put_on_the_floor(device, q_init):\n \"\"\"Make the robot go to the default initial position and wait for the user\n to press the Enter key to start the main control loop\n\n Args:\n device (robot wrapper): a wrapper to communicate with the robot\n q_init (array): the default position of the robot\n \"\"\"\n global key_pressed\n key_pressed = False\n Kp_pos = 3.\n Kd_pos = 0.01\n imax = 3.0\n pos = np.zeros(device.nb_motors)\n for motor in range(device.nb_motors):\n pos[motor] = q_init[device.motorToUrdf[motor]] * \\\n device.gearRatioSigned[motor]\n listener = keyboard.Listener(on_press=on_press)\n listener.start()\n print(\"Put the robot on the floor and press Enter\")\n while not key_pressed:\n device.UpdateMeasurment()\n for motor in range(device.nb_motors):\n ref = Kp_pos*(pos[motor] - device.hardware.GetMotor(motor).GetPosition() -\n Kd_pos*device.hardware.GetMotor(motor).GetVelocity())\n ref = min(imax, max(-imax, ref))\n device.hardware.GetMotor(motor).SetCurrentReference(ref)\n device.SendCommand(WaitEndOfCycle=True)\n\n print(\"Start the motion.\")\n\n\ndef mcapi_playback(name_interface, clib_path):\n \"\"\"Main function that calibrates the robot, get it into a default waiting position then launch\n the main control loop once the user has pressed the Enter key\n\n Args:\n name_interface (string): name of the interface that is used to communicate with the robot\n \"\"\"\n\n #########################################\n # PARAMETERS OF THE MPC-TSID CONTROLLER #\n #########################################\n \n envID = 0 # Identifier of the environment to choose in which one the simulation will happen\n velID = 0 # Identifier of the reference velocity profile to choose which one will be sent to the robot\n\n dt_tsid = 0.0020 # Time step of TSID\n dt_mpc = 0.02 # Time step of the MPC\n # dt is dt_tsid, defined in the TSID controller script\n k_mpc = int(dt_mpc / dt_tsid)\n t = 0.0 # Time\n n_periods = 1 # Number of periods in the prediction horizon\n T_gait = 0.64 # Duration of one gait period\n N_SIMULATION = 20000 # number of simulated TSID time steps\n\t\n # If True the ground is flat, otherwise it has bumps\n use_flat_plane = True\n\n # Enable or disable PyBullet GUI\n enable_pyb_GUI = True\n\t\n # Default position after calibration\n #q_init = np.array([0.0, 0.8, -1.6, 0, 0.8, -1.6,\n # 0, -0.8, 1.6, 0, -0.8, 1.6])\n\t\n q_init = [0,0,0,0,0,0,0,0]\n\t#############################################\n # PARAMETERS OF THE COLL. AVOID. CONTROLLER #\n #############################################\n\t### Set collision avoidance parameters\n collision_threshold = 0.1\n collision_kp = 10.\n collision_kv = 0.01\n k_friction = 0.1\n\t# Load the specified compiled C library\n cCollFun = CDLL(clib_path)\n\n emergency_dist_thresh = collision_threshold/5\n emergency_tau_thresh = 3\n\n emergencyFlag = False\n\n ####\n\n # Create device object\n # device = Solo12(name_interface, dt=DT)\n device = PyBulletSimulator()\n\n # qc = QualisysClient(ip=\"140.93.16.160\", body_id=0) # QualisysClient\n # logger = Logger(device, qualisys=qc) # Logger object\n nb_motors = device.nb_motors\n\n # Calibrate encoders\n #device.Init(calibrateEncoders=True, q_init=q_init)\n device.Init(calibrateEncoders=True, q_init=q_init, envID=envID,\n use_flat_plane=use_flat_plane, enable_pyb_GUI=enable_pyb_GUI, dt=dt_tsid)\n\n # Wait for Enter input before starting the control loop\n # put_on_the_floor(device, q_init)\n\n # CONTROL LOOP ***************************************************\n t = 0.0\n t_max = (N_SIMULATION-2) * dt_tsid\n while ((not device.hardware.IsTimeout()) and (t < t_max)):\n\n device.UpdateMeasurment() # Retrieve data from IMU and Motion capture\n\n # Desired torques\n tau_q = np.zeros(12)\n\n\t\t#tau_q = np.zeros(len(nb_motors))\n\t\t#Check if the controller switched to emergency mode\n if(emergencyFlag):\n # Compute emergency behavior\n # Ex :\n tau_q = computeEmergencyTorque(device.v_mes, collision_kv)\n else:\n\t\t\t# Compute collisions distances and jacobians from the C lib. \n c_results = getLegsCollisionsResults(device.q_mes, cCollFun, nb_motors, 20)\n c_dist_legs = getLegsDistances(c_results, nb_motors, 20)\n c_Jlegs = getLegsJacobians(c_results, nb_motors, 20)\n\t\t\t# Compute collision avoidance torque\n tau_q = computeRepulsiveTorque(device.q_mes, device.v_mes, c_dist_legs, c_Jlegs, dist_thresh=collision_threshold, kp=collision_kp, kv=collision_kv)\n \n\t\t# Set a virtual friction torque to avoid divergence\n tau_q += -k_friction*device.v_mes\n # Set desired torques for the actuators\n device.SetDesiredJointTorque(tau_q)\n\n # Call logger\n # logger.sample(device, qualisys=qc)\n\n # Send command to the robot\n device.SendCommand(WaitEndOfCycle=True)\n if ((device.cpt % 1000) == 0):\n device.Print()\n\n t += DT\n\n # ****************************************************************\n\n # Whatever happened we send 0 torques to the motors.\n device.SetDesiredJointTorque([0]*nb_motors)\n device.SendCommand(WaitEndOfCycle=True)\n\n if device.hardware.IsTimeout():\n print(\"Masterboard timeout detected.\")\n print(\"Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.\")\n # Shut down the interface between the computer and the master board\n device.hardware.Stop()\n\n # Save the logs of the Logger object\n # logger.saveAll()\n\n\ndef main():\n \"\"\"Main function\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description='Playback trajectory to show the extent of solo12 workspace.')\n parser.add_argument('-i',\n '--interface',\n required=True,\n help='Name of the interface (use ifconfig in a terminal), for instance \"enp1s0\"')\n\n parser.add_argument('-C',\n '--clib',\n required=True,\n help='Path to the compiled C-generated library used for distance and jacobian evaluations, for instance \"libcoll_legs8.so\"')\n\n #example_script(parser.parse_args().interface, parser.parse_args().clib)\n\n mcapi_playback(parser.parse_args().interface, parser.parse_args().clib)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.zeros"
]
] |
ratt-ru/dask-ms | [
"becd3572f86a0ad78b55540f25fce6e129976a29"
] | [
"daskms/tests/test_ordering.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport dask\nimport dask.array as da\nfrom numpy.testing import assert_array_equal\nimport pyrap.tables as pt\nimport pytest\n\nfrom daskms.table_proxy import TableProxy\nfrom daskms.ordering import (ordering_taql,\n row_ordering,\n group_ordering_taql,\n group_row_ordering)\nfrom daskms.utils import group_cols_str, index_cols_str, assert_liveness\n\n\ndef table_proxy(ms):\n return TableProxy(pt.table, ms, ack=False,\n lockoptions='user', readonly=True)\n\n\[email protected](\"group_cols\", [\n [\"FIELD_ID\", \"SCAN_NUMBER\"]],\n ids=group_cols_str)\[email protected](\"index_cols\", [\n [\"TIME\", \"ANTENNA1\", \"ANTENNA2\"]],\n ids=index_cols_str)\ndef test_ordering_query_taql_where_strings(ms, group_cols, index_cols):\n taql = group_ordering_taql(table_proxy(ms), group_cols, index_cols,\n taql_where=\"ANTENNA1 != ANTENNA2\")\n assert taql._args[0].replace(\"\\t\", \" \"*4) == (\n \"SELECT\\n\"\n \" FIELD_ID,\\n\"\n \" SCAN_NUMBER,\\n\"\n \" GAGGR(TIME) as GROUP_TIME,\\n\"\n \" GAGGR(ANTENNA1) as GROUP_ANTENNA1,\\n\"\n \" GAGGR(ANTENNA2) as GROUP_ANTENNA2,\\n\"\n \" GROWID() AS __tablerow__,\\n\"\n \" GCOUNT() as __tablerows__,\\n\"\n \" GROWID()[0] as __firstrow__\\n\"\n \"FROM\\n\"\n \" $1\\n\"\n \"WHERE\\n\"\n \" ANTENNA1 != ANTENNA2\\n\"\n \"GROUPBY\\n\"\n \" FIELD_ID,\\n\"\n \" SCAN_NUMBER\")\n\n taql = group_ordering_taql(table_proxy(ms), group_cols, index_cols)\n assert taql._args[0].replace(\"\\t\", \" \"*4) == (\n \"SELECT\\n\"\n \" FIELD_ID,\\n\"\n \" SCAN_NUMBER,\\n\"\n \" GAGGR(TIME) as GROUP_TIME,\\n\"\n \" GAGGR(ANTENNA1) as GROUP_ANTENNA1,\\n\"\n \" GAGGR(ANTENNA2) as GROUP_ANTENNA2,\\n\"\n \" GROWID() AS __tablerow__,\\n\"\n \" GCOUNT() as __tablerows__,\\n\"\n \" GROWID()[0] as __firstrow__\\n\"\n \"FROM\\n\"\n \" $1\\n\"\n \"GROUPBY\\n\"\n \" FIELD_ID,\\n\"\n \" SCAN_NUMBER\")\n\n taql = group_ordering_taql(table_proxy(ms), group_cols, [])\n assert taql._args[0].replace(\"\\t\", \" \"*4) == (\n \"SELECT\\n\"\n \" FIELD_ID,\\n\"\n \" SCAN_NUMBER,\\n\"\n \" GROWID() AS __tablerow__,\\n\"\n \" GCOUNT() as __tablerows__,\\n\"\n \" GROWID()[0] as __firstrow__\\n\"\n \"FROM\\n\"\n \" $1\\n\"\n \"GROUPBY\\n\"\n \" FIELD_ID,\\n\"\n \" SCAN_NUMBER\")\n\n taql = ordering_taql(table_proxy(ms), index_cols,\n taql_where=\"ANTENNA1 != ANTENNA2\")\n assert taql._args[0].replace(\"\\t\", \" \"*4) == (\n \"SELECT\\n\"\n \" ROWID() as __tablerow__\\n\"\n \"FROM\\n\"\n \" $1\\n\"\n \"WHERE\\n\"\n \" ANTENNA1 != ANTENNA2\\n\"\n \"ORDERBY\\n\"\n \" TIME,\\n\"\n \" ANTENNA1,\\n\"\n \" ANTENNA2\")\n\n taql = ordering_taql(table_proxy(ms), index_cols)\n assert taql._args[0].replace(\"\\t\", \" \"*4) == (\n \"SELECT\\n\"\n \" ROWID() as __tablerow__\\n\"\n \"FROM\\n\"\n \" $1\\n\"\n \"ORDERBY\\n\"\n \" TIME,\\n\"\n \" ANTENNA1,\\n\"\n \" ANTENNA2\")\n\n taql = ordering_taql(table_proxy(ms), [])\n assert taql._args[0].replace(\"\\t\", \" \"*4) == (\n \"SELECT\\n\"\n \" ROWID() as __tablerow__\\n\"\n \"FROM\\n\"\n \" $1\\n\")\n\n\[email protected](\"group_cols\", [\n [\"FIELD_ID\", \"SCAN_NUMBER\"]],\n ids=group_cols_str)\[email protected](\"index_cols\", [\n [\"TIME\", \"ANTENNA1\", \"ANTENNA2\"]],\n ids=index_cols_str)\ndef test_ordering_multiple_groups(ms, group_cols, index_cols):\n group_taql = group_ordering_taql(table_proxy(ms), group_cols, index_cols)\n assert_liveness(2, 1)\n orders = group_row_ordering(group_taql, group_cols,\n index_cols, [{'row': 2}])\n assert_liveness(2, 1)\n first_rows = group_taql.getcol(\"__firstrow__\").result()\n assert_liveness(2, 1)\n\n assert len(first_rows) == len(orders) == 6\n\n assert_array_equal(first_rows, [0, 1, 3, 4, 7, 8])\n\n rowid_arrays = tuple(o[0] for o in orders)\n rowids = dask.compute(rowid_arrays)[0]\n\n assert_array_equal(rowids[0], [2, 0])\n assert_array_equal(rowids[1], [1])\n assert_array_equal(rowids[2], [5, 3])\n assert_array_equal(rowids[3], [6, 4])\n assert_array_equal(rowids[4], [9, 7])\n assert_array_equal(rowids[5], [8])\n\n del first_rows, orders, rowid_arrays, group_taql\n assert_liveness(0, 0)\n\n\[email protected](\"index_cols\", [\n [\"TIME\", \"ANTENNA1\", \"ANTENNA2\"]],\n ids=index_cols_str)\[email protected](\"chunks\", [\n {'row': 2},\n {'row': (2, 3, 4, 1)},\n {'row': (5, 3, 2)}],\n ids=lambda c: f'chunks={c}')\ndef test_row_ordering_no_group(ms, index_cols, chunks):\n order_taql = ordering_taql(table_proxy(ms), index_cols)\n assert_liveness(2, 1)\n orders = row_ordering(order_taql, index_cols, chunks)\n assert_liveness(2, 1)\n\n # Normalise chunks to match that of the output array\n expected_chunks = da.core.normalize_chunks(chunks['row'], (10,))\n\n assert orders[0].chunks == expected_chunks\n\n rowids = dask.compute(orders[0])[0]\n assert_array_equal(rowids, [9, 8, 7, 6, 5, 4, 3, 2, 1, 0])\n\n del orders, order_taql\n assert_liveness(0, 0)\n\n\n# Grouping on DATA_DESC_ID gives us two groups\n# one with 7 rows and the other with 3\[email protected](\"group_cols\", [\n [\"DATA_DESC_ID\"]],\n ids=group_cols_str)\[email protected](\"index_cols\", [\n [\"TIME\", \"ANTENNA1\", \"ANTENNA2\"]],\n ids=index_cols_str)\[email protected](\"chunks\", [\n [{'row': 2}, {'row': 2}],\n [{'row': (3, 4)}, {'row': 3}],\n [{'row': (2, 3, 2)}, {'row': (2, 1)}],\n [{'row': 2}]],\n ids=lambda c: f'chunks={c}')\ndef test_row_ordering_multiple_groups(ms, group_cols,\n index_cols, chunks):\n group_taql = group_ordering_taql(table_proxy(ms), group_cols, index_cols)\n assert_liveness(2, 1)\n orders = group_row_ordering(group_taql, group_cols, index_cols, chunks)\n assert_liveness(2, 1)\n first_rows = group_taql.getcol(\"__firstrow__\").result()\n assert_liveness(2, 1)\n\n # We get two groups out\n assert len(orders) == len(first_rows) == 2\n assert_array_equal(first_rows, [0, 7])\n\n rowid_arrays = tuple(o[0] for o in orders)\n rowids = dask.compute(rowid_arrays)[0]\n\n # Check the two resulting groups\n\n # Normalise chunks to match that of the output array\n row_chunks = chunks[0]['row']\n expected_chunks = da.core.normalize_chunks(row_chunks, (7,))\n assert_array_equal(rowids[0], [6, 5, 4, 3, 2, 1, 0])\n assert rowid_arrays[0].chunks == expected_chunks\n\n # If chunks only supplied for the first group, re-use it's chunking\n row_chunks = chunks[0]['row'] if len(chunks) == 1 else chunks[1]['row']\n expected_chunks = da.core.normalize_chunks(row_chunks, (3,))\n assert_array_equal(rowids[1], [9, 8, 7])\n assert rowid_arrays[1].chunks == expected_chunks\n\n del first_rows, orders, rowid_arrays, group_taql\n assert_liveness(0, 0)\n"
] | [
[
"numpy.testing.assert_array_equal"
]
] |
TB5zhh/ViewpointBottleneck | [
"db0fe4b61ae42eceff21296844200d636e6e5e83"
] | [
"lib/distributed_utils.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport pickle\nimport socket\nimport struct\nimport subprocess\nimport warnings\n\nimport torch\nimport torch.distributed as dist\n\n\n\ndef is_master(args):\n return args.distributed_rank == 0\n\n\ndef infer_init_method(args):\n if args.distributed_init_method is not None:\n return\n\n # support torch.distributed.launch\n if all(key in os.environ for key in [\n 'MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'RANK'\n ]):\n args.distributed_init_method = 'env://'\n args.distributed_world_size = int(os.environ['WORLD_SIZE'])\n args.distributed_rank = int(os.environ['RANK'])\n\n # we can determine the init method automatically for Slurm\n elif args.distributed_port > 0:\n node_list = os.environ.get('SLURM_STEP_NODELIST')\n if node_list is None:\n node_list = os.environ.get('SLURM_JOB_NODELIST')\n if node_list is not None:\n try:\n hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])\n args.distributed_init_method = 'tcp://{host}:{port}'.format(\n host=hostnames.split()[0].decode('utf-8'),\n port=args.distributed_port,\n )\n nnodes = int(os.environ.get('SLURM_NNODES'))\n ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE')\n if ntasks_per_node is not None:\n ntasks_per_node = int(ntasks_per_node)\n else:\n ntasks = int(os.environ.get('SLURM_NTASKS'))\n nnodes = int(os.environ.get('SLURM_NNODES'))\n assert ntasks % nnodes == 0\n ntasks_per_node = int(ntasks / nnodes)\n if ntasks_per_node == 1:\n assert args.distributed_world_size % nnodes == 0\n gpus_per_node = args.distributed_world_size // nnodes\n node_id = int(os.environ.get('SLURM_NODEID'))\n args.distributed_rank = node_id * gpus_per_node\n else:\n assert ntasks_per_node == args.distributed_world_size // nnodes\n args.distributed_no_spawn = True\n args.distributed_rank = int(os.environ.get('SLURM_PROCID'))\n args.device_id = int(os.environ.get('SLURM_LOCALID'))\n except subprocess.CalledProcessError as e: # scontrol failed\n raise e\n except FileNotFoundError: # Slurm is not installed\n pass\n\n\ndef distributed_init(args):\n if args.distributed_world_size == 1:\n raise ValueError('Cannot initialize distributed with distributed_world_size=1')\n\n if torch.distributed.is_initialized():\n warnings.warn('Distributed is already initialized, cannot initialize twice!')\n else:\n print('| distributed init (rank {}): {}'.format(\n args.distributed_rank, args.distributed_init_method), flush=True)\n dist.init_process_group(\n backend=args.distributed_backend,\n init_method=args.distributed_init_method,\n world_size=args.distributed_world_size,\n rank=args.distributed_rank,\n )\n print('| initialized host {} as rank {}'.format(\n socket.gethostname(), args.distributed_rank), flush=True)\n\n # perform a dummy all-reduce to initialize the NCCL communicator\n if torch.cuda.is_available():\n dist.all_reduce(torch.zeros(1).cuda())\n else:\n dist.all_reduce(torch.zeros(1))\n\n suppress_output(is_master(args))\n\n args.distributed_rank = torch.distributed.get_rank()\n return args.distributed_rank\n\n\ndef suppress_output(is_master):\n \"\"\"Suppress printing on the current device. Force printing with `force=True`.\"\"\"\n import builtins as __builtin__\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop('force', False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef get_rank():\n return dist.get_rank()\n\n\ndef get_world_size():\n try:\n return dist.get_world_size()\n except AssertionError:\n return 1\n\ndef get_default_group():\n return dist.group.WORLD\n\n\ndef all_reduce(tensor, op='sum', group=None):\n if group is None:\n group = get_default_group()\n output = dist.all_reduce(tensor, group=group)\n if op == 'mean':\n return output / get_world_size()\n return output\n\ndef all_gather_list(data, group=None, max_size=16384):\n \"\"\"Gathers arbitrary data from all nodes into a list.\n\n Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python\n data. Note that *data* must be picklable.\n\n Args:\n data (Any): data from the local worker to be gathered on other workers\n group (optional): group of the collective\n max_size (int, optional): maximum size of the data to be gathered\n across workers\n \"\"\"\n rank = get_rank()\n world_size = get_world_size()\n\n buffer_size = max_size * world_size\n if not hasattr(all_gather_list, '_buffer') or \\\n all_gather_list._buffer.numel() < buffer_size:\n all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)\n all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()\n buffer = all_gather_list._buffer\n buffer.zero_()\n cpu_buffer = all_gather_list._cpu_buffer\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n header_size = 4 # size of header that contains the length of the encoded data\n size = header_size + enc_size\n if size > max_size:\n raise ValueError('encoded data size ({}) exceeds max_size ({})'.format(size, max_size))\n\n header = struct.pack(\">I\", enc_size)\n cpu_buffer[:size] = torch.ByteTensor(list(header + enc))\n start = rank * max_size\n buffer[start:start + size].copy_(cpu_buffer[:size])\n\n all_reduce(buffer, group=group)\n\n try:\n result = []\n for i in range(world_size):\n out_buffer = buffer[i * max_size:(i + 1) * max_size]\n enc_size, = struct.unpack(\">I\", bytes(out_buffer[:header_size].tolist()))\n if enc_size > 0:\n result.append(pickle.loads(bytes(out_buffer[header_size:header_size + enc_size].tolist())))\n return result\n except pickle.UnpicklingError:\n raise Exception(\n 'Unable to unpickle data from other workers. all_gather_list requires all '\n 'workers to enter the function together, so this error usually indicates '\n 'that the workers have fallen out of sync somehow. Workers can fall out of '\n 'sync if one of them runs out of memory, or if there are other conditions '\n 'in your training script that can cause one worker to finish an epoch '\n 'while other workers are still iterating over their portions of the data.'\n )\n"
] | [
[
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.init_process_group",
"torch.cuda.ByteTensor",
"torch.distributed.is_initialized",
"torch.ByteTensor",
"torch.cuda.is_available",
"torch.distributed.all_reduce",
"torch.zeros"
]
] |
ntgiwsvp/qiskit-terra | [
"206b8bcc930817d88f8244f7b984880aecde959d"
] | [
"qiskit/circuit/gate.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Unitary gate.\"\"\"\n\nfrom warnings import warn\nfrom typing import List, Optional, Union, Tuple\nimport numpy as np\nfrom scipy.linalg import schur\n\nfrom qiskit.circuit.parameter import ParameterExpression\nfrom qiskit.circuit.exceptions import CircuitError\nfrom .instruction import Instruction\n\n\nclass Gate(Instruction):\n \"\"\"Unitary gate.\"\"\"\n\n def __init__(self, name: str, num_qubits: int, params: List,\n label: Optional[str] = None) -> None:\n \"\"\"Create a new gate.\n\n Args:\n name: The Qobj name of the gate.\n num_qubits: The number of qubits the gate acts on.\n params: A list of parameters.\n label: An optional label for the gate.\n \"\"\"\n self._label = label\n self.definition = None\n super().__init__(name, num_qubits, 0, params)\n\n # Set higher priority than Numpy array and matrix classes\n __array_priority__ = 20\n\n def to_matrix(self) -> np.ndarray:\n \"\"\"Return a Numpy.array for the gate unitary matrix.\n\n Returns:\n np.ndarray: if the Gate subclass has a matrix defintion.\n\n Raises:\n CircuitError: If a Gate subclass does not implement this method an\n exception will be raised when this base class method is called.\n \"\"\"\n if hasattr(self, '__array__'):\n # pylint: disable=no-member\n return self.__array__(dtype=complex)\n raise CircuitError(\"to_matrix not defined for this {}\".format(type(self)))\n\n def power(self, exponent: float):\n \"\"\"Creates a unitary gate as `gate^exponent`.\n\n Args:\n exponent (float): Gate^exponent\n\n Returns:\n qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent.\n\n Raises:\n CircuitError: If Gate is not unitary\n \"\"\"\n from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import\n from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import\n # Should be diagonalized because it's a unitary.\n decomposition, unitary = schur(Operator(self).data, output='complex')\n # Raise the diagonal entries to the specified power\n decomposition_power = list()\n\n decomposition_diagonal = decomposition.diagonal()\n # assert off-diagonal are 0\n if not np.allclose(np.diag(decomposition_diagonal), decomposition):\n raise CircuitError('The matrix is not diagonal')\n\n for element in decomposition_diagonal:\n decomposition_power.append(pow(element, exponent))\n # Then reconstruct the resulting gate.\n unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T\n return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent))\n\n def _return_repeat(self, exponent: float) -> 'Gate':\n return Gate(name=\"%s*%s\" % (self.name, exponent), num_qubits=self.num_qubits,\n params=self.params)\n\n def assemble(self) -> 'Instruction':\n \"\"\"Assemble a QasmQobjInstruction\"\"\"\n instruction = super().assemble()\n if self.label:\n instruction.label = self.label\n return instruction\n\n @property\n def label(self) -> str:\n \"\"\"Return gate label\"\"\"\n return self._label\n\n @label.setter\n def label(self, name: str):\n \"\"\"Set gate label to name\n\n Args:\n name (str or None): label to assign unitary\n\n Raises:\n TypeError: name is not string or None.\n \"\"\"\n if isinstance(name, (str, type(None))):\n self._label = name\n else:\n raise TypeError('label expects a string or None')\n\n def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None,\n ctrl_state: Optional[Union[int, str]] = None):\n \"\"\"Return controlled version of gate. See :class:`.ControlledGate` for usage.\n\n Args:\n num_ctrl_qubits: number of controls to add to gate (default=1)\n label: optional gate label\n ctrl_state: The control state in decimal or as a bitstring\n (e.g. '111'). If None, use 2**num_ctrl_qubits-1.\n\n Returns:\n qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm\n uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size\n num_qubits + 2*num_ctrl_qubits - 1.\n\n Raises:\n QiskitError: unrecognized mode or invalid ctrl_state\n \"\"\"\n # pylint: disable=cyclic-import\n from .add_control import add_control\n return add_control(self, num_ctrl_qubits, label, ctrl_state)\n\n @staticmethod\n def _broadcast_single_argument(qarg: List) -> List:\n \"\"\"Expands a single argument.\n\n For example: [q[0], q[1]] -> [q[0]], [q[1]]\n \"\"\"\n # [q[0], q[1]] -> [q[0]]\n # -> [q[1]]\n for arg0 in qarg:\n yield [arg0], []\n\n @staticmethod\n def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List:\n if len(qarg0) == len(qarg1):\n # [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]]\n # -> [q[1], r[1]]\n for arg0, arg1 in zip(qarg0, qarg1):\n yield [arg0, arg1], []\n elif len(qarg0) == 1:\n # [[q[0]], [r[0], r[1]]] -> [q[0], r[0]]\n # -> [q[0], r[1]]\n for arg1 in qarg1:\n yield [qarg0[0], arg1], []\n elif len(qarg1) == 1:\n # [[q[0], q[1]], [r[0]]] -> [q[0], r[0]]\n # -> [q[1], r[0]]\n for arg0 in qarg0:\n yield [arg0, qarg1[0]], []\n else:\n raise CircuitError('Not sure how to combine these two-qubit arguments:\\n %s\\n %s' %\n (qarg0, qarg1))\n\n @staticmethod\n def _broadcast_3_or_more_args(qargs: List) -> List:\n if all(len(qarg) == len(qargs[0]) for qarg in qargs):\n for arg in zip(*qargs):\n yield list(arg), []\n else:\n raise CircuitError(\n 'Not sure how to combine these qubit arguments:\\n %s\\n' % qargs)\n\n def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]:\n \"\"\"Validation and handling of the arguments and its relationship.\n\n For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This\n method yields the arguments in the right grouping. In the given example::\n\n in: [[q[0],q[1]], q[2]],[]\n outs: [q[0], q[2]], []\n [q[1], q[2]], []\n\n The general broadcasting rules are:\n\n * If len(qargs) == 1::\n\n [q[0], q[1]] -> [q[0]],[q[1]]\n\n * If len(qargs) == 2::\n\n [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]]\n [[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]]\n [[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]]\n\n * If len(qargs) >= 3::\n\n [q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...]\n\n Args:\n qargs: List of quantum bit arguments.\n cargs: List of classical bit arguments.\n\n Returns:\n A tuple with single arguments.\n\n Raises:\n CircuitError: If the input is not valid. For example, the number of\n arguments does not match the gate expectation.\n \"\"\"\n if len(qargs) != self.num_qubits or cargs:\n raise CircuitError(\n 'The amount of qubit/clbit arguments does not match the gate expectation.')\n\n if any([not qarg for qarg in qargs]):\n raise CircuitError('One or more of the arguments are empty')\n\n if len(qargs) == 1:\n return Gate._broadcast_single_argument(qargs[0])\n elif len(qargs) == 2:\n return Gate._broadcast_2_arguments(qargs[0], qargs[1])\n elif len(qargs) >= 3:\n return Gate._broadcast_3_or_more_args(qargs)\n else:\n raise CircuitError('This gate cannot handle %i arguments' % len(qargs))\n\n def validate_parameter(self, parameter):\n \"\"\"Gate parameters should be int, float, or ParameterExpression\"\"\"\n if isinstance(parameter, ParameterExpression):\n if len(parameter.parameters) > 0:\n return parameter # expression has free parameters, we cannot validate it\n if not parameter._symbol_expr.is_real:\n raise CircuitError(\"Bound parameter expression is complex in gate {}\".format(\n self.name))\n return parameter # per default assume parameters must be real when bound\n if isinstance(parameter, (int, float)):\n return parameter\n elif isinstance(parameter, (np.integer, np.floating)):\n return parameter.item()\n elif isinstance(parameter, np.ndarray):\n warn(\"Gate param type %s is being deprecated as of 0.16.0, and will be removed \"\n \"no earlier than 3 months after that release date. \"\n \"Considering creating your own Gate subclass with the method validate_parameter \"\n \" to allow this param type.\" % type(parameter), DeprecationWarning, 3)\n return parameter\n else:\n raise CircuitError(\"Invalid param type {0} for gate {1}.\".format(type(parameter),\n self.name))\n"
] | [
[
"numpy.diag"
]
] |
dynasty-com/NeMo | [
"1ac828df423fbcec1b34c650b3a20266bb133dde",
"1ac828df423fbcec1b34c650b3a20266bb133dde"
] | [
"nemo/collections/asr/models/label_models.py",
"nemo/collections/asr/data/audio_to_text.py"
] | [
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport json\nimport os\nimport pickle as pkl\nfrom typing import Dict, List, Optional, Union\n\nimport onnx\nimport torch\nfrom omegaconf import DictConfig\nfrom omegaconf.omegaconf import open_dict\nfrom pytorch_lightning import Trainer\n\nfrom nemo.collections.asr.data.audio_to_label import AudioToSpeechLabelDataSet\nfrom nemo.collections.asr.losses.angularloss import AngularSoftmaxLoss\nfrom nemo.collections.asr.parts.features import WaveformFeaturizer\nfrom nemo.collections.asr.parts.perturb import process_augmentations\nfrom nemo.collections.common.losses import CrossEntropyLoss as CELoss\nfrom nemo.collections.common.metrics import TopKClassificationAccuracy\nfrom nemo.core.classes import ModelPT\nfrom nemo.core.classes.common import PretrainedModelInfo, typecheck\nfrom nemo.core.classes.exportable import Exportable\nfrom nemo.core.neural_types import *\nfrom nemo.utils import logging\nfrom nemo.utils.export_utils import attach_onnx_to_onnx\n\n__all__ = ['EncDecSpeakerLabelModel', 'ExtractSpeakerEmbeddingsModel']\n\n\nclass EncDecSpeakerLabelModel(ModelPT, Exportable):\n \"\"\"Encoder decoder class for speaker label models.\n Model class creates training, validation methods for setting up data\n performing model forward pass.\n Expects config dict for\n * preprocessor\n * Jasper/Quartznet Encoder\n * Speaker Decoder\n \"\"\"\n\n @classmethod\n def list_available_models(cls) -> List[PretrainedModelInfo]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n\n Returns:\n List of available pre-trained models.\n \"\"\"\n result = []\n model = PretrainedModelInfo(\n pretrained_model_name=\"SpeakerNet_recognition\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/SpeakerNet_recognition.nemo\",\n description=\"SpeakerNet_recognition model trained end-to-end for speaker recognition purposes with cross_entropy loss. It was trained on voxceleb 1, voxceleb 2 dev datasets and augmented with musan music and noise. Speaker Recognition model achieves 2.65% EER on voxceleb-O cleaned trial file\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"SpeakerNet_verification\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/SpeakerNet_verification.nemo\",\n description=\"SpeakerNet_verification model trained end-to-end for speaker verification purposes with arcface angular softmax loss. It was trained on voxceleb 1, voxceleb 2 dev datasets and augmented with musan music and noise. Speaker Verification model achieves 2.12% EER on voxceleb-O cleaned trial file\",\n )\n result.append(model)\n\n return result\n\n def __init__(self, cfg: DictConfig, trainer: Trainer = None):\n super().__init__(cfg=cfg, trainer=trainer)\n self.preprocessor = EncDecSpeakerLabelModel.from_config_dict(cfg.preprocessor)\n self.encoder = EncDecSpeakerLabelModel.from_config_dict(cfg.encoder)\n self.decoder = EncDecSpeakerLabelModel.from_config_dict(cfg.decoder)\n if 'angular' in cfg.decoder and cfg.decoder['angular']:\n logging.info(\"Training with Angular Softmax Loss\")\n scale = cfg.loss.scale\n margin = cfg.loss.margin\n self.loss = AngularSoftmaxLoss(scale=scale, margin=margin)\n else:\n logging.info(\"Training with Softmax-CrossEntropy loss\")\n self.loss = CELoss()\n\n self._accuracy = TopKClassificationAccuracy(top_k=[1], dist_sync_on_step=True)\n\n def __setup_dataloader_from_config(self, config: Optional[Dict]):\n if 'augmentor' in config:\n augmentor = process_augmentations(config['augmentor'])\n else:\n augmentor = None\n\n featurizer = WaveformFeaturizer(\n sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor\n )\n self.dataset = AudioToSpeechLabelDataSet(\n manifest_filepath=config['manifest_filepath'],\n labels=config['labels'],\n featurizer=featurizer,\n max_duration=config.get('max_duration', None),\n min_duration=config.get('min_duration', None),\n trim=config.get('trim_silence', True),\n load_audio=config.get('load_audio', True),\n time_length=config.get('time_length', 8),\n )\n\n return torch.utils.data.DataLoader(\n dataset=self.dataset,\n batch_size=config['batch_size'],\n collate_fn=self.dataset.fixed_seq_collate_fn,\n drop_last=config.get('drop_last', False),\n shuffle=config['shuffle'],\n num_workers=config.get('num_workers', 2),\n pin_memory=config.get('pin_memory', False),\n )\n\n def setup_training_data(self, train_data_layer_config: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in train_data_layer_config:\n train_data_layer_config['shuffle'] = True\n self._train_dl = self.__setup_dataloader_from_config(config=train_data_layer_config)\n\n def setup_validation_data(self, val_data_layer_config: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in val_data_layer_config:\n val_data_layer_config['shuffle'] = False\n val_data_layer_config['labels'] = self.dataset.labels\n self._validation_dl = self.__setup_dataloader_from_config(config=val_data_layer_config)\n\n def setup_test_data(self, test_data_layer_params: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in test_data_layer_params:\n test_data_layer_params['shuffle'] = False\n if hasattr(self, 'dataset'):\n test_data_layer_params['labels'] = self.dataset.labels\n self.embedding_dir = test_data_layer_params.get('embedding_dir', './')\n self.test_manifest = test_data_layer_params.get('manifest_filepath', None)\n self._test_dl = self.__setup_dataloader_from_config(config=test_data_layer_params)\n\n @property\n def input_types(self) -> Optional[Dict[str, NeuralType]]:\n if hasattr(self.preprocessor, '_sample_rate'):\n audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)\n else:\n audio_eltype = AudioSignal()\n return {\n \"input_signal\": NeuralType(('B', 'T'), audio_eltype),\n \"input_signal_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n return {\n \"logits\": NeuralType(('B', 'D'), LogitsType()),\n \"embs\": NeuralType(('B', 'D'), AcousticEncodedRepresentation()),\n }\n\n @typecheck()\n def forward(self, input_signal, input_signal_length):\n processed_signal, processed_signal_len = self.preprocessor(\n input_signal=input_signal, length=input_signal_length,\n )\n\n encoded, _ = self.encoder(audio_signal=processed_signal, length=processed_signal_len)\n logits, embs = self.decoder(encoder_output=encoded)\n return logits, embs\n\n # PTL-specific methods\n def training_step(self, batch, batch_idx):\n audio_signal, audio_signal_len, labels, _ = batch\n logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss = self.loss(logits=logits, labels=labels)\n\n self.log('loss', loss)\n self.log('learning_rate', self._optimizer.param_groups[0]['lr'])\n\n self._accuracy(logits=logits, labels=labels)\n top_k = self._accuracy.compute()\n for i, top_i in enumerate(top_k):\n self.log(f'training_batch_accuracy_top@{i}', top_i)\n\n return {'loss': loss}\n\n def validation_step(self, batch, batch_idx, dataloader_idx: int = 0):\n audio_signal, audio_signal_len, labels, _ = batch\n logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss_value = self.loss(logits=logits, labels=labels)\n acc_top_k = self._accuracy(logits=logits, labels=labels)\n correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k\n\n return {\n 'val_loss': loss_value,\n 'val_correct_counts': correct_counts,\n 'val_total_counts': total_counts,\n 'val_acc_top_k': acc_top_k,\n }\n\n def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):\n val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()\n correct_counts = torch.stack([x['val_correct_counts'] for x in outputs]).sum(axis=0)\n total_counts = torch.stack([x['val_total_counts'] for x in outputs]).sum(axis=0)\n\n self._accuracy.correct_counts_k = correct_counts\n self._accuracy.total_counts_k = total_counts\n topk_scores = self._accuracy.compute()\n\n logging.info(\"val_loss: {:.3f}\".format(val_loss_mean))\n self.log('val_loss', val_loss_mean)\n for top_k, score in zip(self._accuracy.top_k, topk_scores):\n self.log('val_epoch_accuracy_top@{}'.format(top_k), score)\n\n return {\n 'val_loss': val_loss_mean,\n 'val_acc_top_k': topk_scores,\n }\n\n def test_step(self, batch, batch_idx, dataloader_idx: int = 0):\n audio_signal, audio_signal_len, labels, _ = batch\n logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n loss_value = self.loss(logits=logits, labels=labels)\n acc_top_k = self._accuracy(logits=logits, labels=labels)\n correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k\n\n return {\n 'test_loss': loss_value,\n 'test_correct_counts': correct_counts,\n 'test_total_counts': total_counts,\n 'test_acc_top_k': acc_top_k,\n }\n\n def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):\n test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()\n correct_counts = torch.stack([x['test_correct_counts'] for x in outputs]).sum(axis=0)\n total_counts = torch.stack([x['test_total_counts'] for x in outputs]).sum(axis=0)\n\n self._accuracy.correct_counts_k = correct_counts\n self._accuracy.total_counts_k = total_counts\n topk_scores = self._accuracy.compute()\n\n logging.info(\"test_loss: {:.3f}\".format(test_loss_mean))\n self.log('test_loss', test_loss_mean)\n for top_k, score in zip(self._accuracy.top_k, topk_scores):\n self.log('test_epoch_accuracy_top@{}'.format(top_k), score)\n\n return {\n 'test_loss': test_loss_mean,\n 'test_acc_top_k': topk_scores,\n }\n\n def setup_finetune_model(self, model_config: DictConfig):\n \"\"\"\n setup_finetune_model method sets up training data, validation data and test data with new\n provided config, this checks for the previous labels set up during training from scratch, if None,\n it sets up labels for provided finetune data from manifest files\n\n Args:\n model_config: cfg which has train_ds, optional validation_ds, optional test_ds and\n mandatory encoder and decoder model params\n make sure you set num_classes correctly for finetune data\n\n Returns: None\n\n \"\"\"\n if hasattr(self, 'dataset'):\n scratch_labels = self.dataset.labels\n else:\n scratch_labels = None\n\n logging.info(\"Setting up data loaders with manifests provided from model_config\")\n\n if 'train_ds' in model_config and model_config.train_ds is not None:\n self.setup_training_data(model_config.train_ds)\n else:\n raise KeyError(\"train_ds is not found in model_config but you need it for fine tuning\")\n\n if self.dataset.labels is None or len(self.dataset.labels) == 0:\n raise ValueError(f'New labels must be non-empty list of labels. But I got: {self.dataset.labels}')\n\n if 'validation_ds' in model_config and model_config.validation_ds is not None:\n self.setup_multiple_validation_data(model_config.validation_ds)\n\n if 'test_ds' in model_config and model_config.test_ds is not None:\n self.setup_multiple_test_data(model_config.test_ds)\n\n if scratch_labels == self.dataset.labels: # checking for new finetune dataset labels\n logging.warning(\n \"Trained dataset labels are same as finetune dataset labels -- continuing change of decoder parameters\"\n )\n elif scratch_labels is None:\n logging.warning(\n \"Either you provided a dummy manifest file during training from scratch or you restored from a pretrained nemo file\"\n )\n\n decoder_config = model_config.decoder\n new_decoder_config = copy.deepcopy(decoder_config)\n if new_decoder_config['num_classes'] != len(self.dataset.labels):\n raise ValueError(\n \"number of classes provided {} is not same as number of different labels in finetuning data: {}\".format(\n new_decoder_config['num_classes'], len(self.dataset.labels)\n )\n )\n\n del self.decoder\n self.decoder = EncDecSpeakerLabelModel.from_config_dict(new_decoder_config)\n\n with open_dict(self._cfg.decoder):\n self._cfg.decoder = new_decoder_config\n\n logging.info(f\"Changed decoder output to # {self.decoder._num_classes} classes.\")\n\n def export(\n self,\n output: str,\n input_example=None,\n output_example=None,\n verbose=False,\n export_params=True,\n do_constant_folding=True,\n keep_initializers_as_inputs=False,\n onnx_opset_version: int = 12,\n try_script: bool = False,\n set_eval: bool = True,\n check_trace: bool = True,\n use_dynamic_axes: bool = True,\n ):\n if input_example is not None or output_example is not None:\n logging.warning(\n \"Passed input and output examples will be ignored and recomputed since\"\n \" EncDecSpeakerModel consists of two separate models (encoder and decoder) with different\"\n \" inputs and outputs.\"\n )\n\n encoder_onnx = self.encoder.export(\n os.path.join(os.path.dirname(output), 'encoder_' + os.path.basename(output)),\n None, # computed by input_example()\n None,\n verbose,\n export_params,\n do_constant_folding,\n keep_initializers_as_inputs,\n onnx_opset_version,\n try_script,\n set_eval,\n check_trace,\n use_dynamic_axes,\n )\n\n decoder_onnx = self.decoder.export(\n os.path.join(os.path.dirname(output), 'decoder_' + os.path.basename(output)),\n None, # computed by input_example()\n None,\n verbose,\n export_params,\n do_constant_folding,\n keep_initializers_as_inputs,\n onnx_opset_version,\n try_script,\n set_eval,\n check_trace,\n use_dynamic_axes,\n )\n\n output_model = attach_onnx_to_onnx(encoder_onnx, decoder_onnx, \"SL\")\n onnx.save(output_model, output)\n\n\nclass ExtractSpeakerEmbeddingsModel(EncDecSpeakerLabelModel):\n \"\"\"\n This Model class facilitates extraction of speaker embeddings from a pretrained model.\n Respective embedding file is saved in self.embedding dir passed through cfg\n \"\"\"\n\n def __init__(self, cfg: DictConfig, trainer: Trainer = None):\n super().__init__(cfg=cfg, trainer=trainer)\n\n def test_step(self, batch, batch_ix):\n audio_signal, audio_signal_len, labels, slices = batch\n _, embs = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)\n return {'embs': embs, 'labels': labels, 'slices': slices}\n\n def test_epoch_end(self, outputs):\n embs = torch.cat([x['embs'] for x in outputs])\n slices = torch.cat([x['slices'] for x in outputs])\n emb_shape = embs.shape[-1]\n embs = embs.view(-1, emb_shape).cpu().numpy()\n out_embeddings = {}\n start_idx = 0\n with open(self.test_manifest, 'r') as manifest:\n for idx, line in enumerate(manifest.readlines()):\n line = line.strip()\n dic = json.loads(line)\n structure = dic['audio_filepath'].split('/')[-3:]\n uniq_name = '@'.join(structure)\n if uniq_name in out_embeddings:\n raise KeyError(\"Embeddings for label {} already present in emb dictionary\".format(uniq_name))\n num_slices = slices[idx]\n end_idx = start_idx + num_slices\n out_embeddings[uniq_name] = embs[start_idx:end_idx].mean(axis=0)\n start_idx = end_idx\n\n embedding_dir = os.path.join(self.embedding_dir, 'embeddings')\n if not os.path.exists(embedding_dir):\n os.mkdir(embedding_dir)\n\n prefix = self.test_manifest.split('/')[-1].split('.')[-2]\n\n name = os.path.join(embedding_dir, prefix)\n pkl.dump(out_embeddings, open(name + '_embeddings.pkl', 'wb'))\n logging.info(\"Saved embedding files to {}\".format(embedding_dir))\n\n return {}\n",
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport io\nimport os\nfrom typing import Callable, Dict, List, Optional, Union\n\nimport braceexpand\nimport torch\nimport webdataset as wd\nfrom torch.nn import functional as F\n\nfrom nemo.collections.asr.data import vocabs\nfrom nemo.collections.asr.parts import collections, parsers\nfrom nemo.collections.asr.parts.features import WaveformFeaturizer\nfrom nemo.core.classes import Dataset, IterableDataset\nfrom nemo.core.neural_types import *\nfrom nemo.utils import logging\nfrom nemo.utils.decorators import experimental\n\n__all__ = [\n 'AudioToCharDataset',\n 'AudioToCharWithDursDataset',\n 'AudioToBPEDataset',\n 'AudioLabelDataset',\n 'TarredAudioToCharDataset',\n 'TarredAudioToBPEDataset',\n]\n\n\ndef _speech_collate_fn(batch, pad_id):\n \"\"\"collate batch of audio sig, audio len, tokens, tokens len\n Args:\n batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,\n LongTensor): A tuple of tuples of signal, signal lengths,\n encoded tokens, and encoded tokens length. This collate func\n assumes the signals are 1d torch tensors (i.e. mono audio).\n \"\"\"\n _, audio_lengths, _, tokens_lengths = zip(*batch)\n max_audio_len = 0\n has_audio = audio_lengths[0] is not None\n if has_audio:\n max_audio_len = max(audio_lengths).item()\n max_tokens_len = max(tokens_lengths).item()\n\n audio_signal, tokens = [], []\n for sig, sig_len, tokens_i, tokens_i_len in batch:\n if has_audio:\n sig_len = sig_len.item()\n if sig_len < max_audio_len:\n pad = (0, max_audio_len - sig_len)\n sig = torch.nn.functional.pad(sig, pad)\n audio_signal.append(sig)\n tokens_i_len = tokens_i_len.item()\n if tokens_i_len < max_tokens_len:\n pad = (0, max_tokens_len - tokens_i_len)\n tokens_i = torch.nn.functional.pad(tokens_i, pad, value=pad_id)\n tokens.append(tokens_i)\n\n if has_audio:\n audio_signal = torch.stack(audio_signal)\n audio_lengths = torch.stack(audio_lengths)\n else:\n audio_signal, audio_lengths = None, None\n tokens = torch.stack(tokens)\n tokens_lengths = torch.stack(tokens_lengths)\n\n return audio_signal, audio_lengths, tokens, tokens_lengths\n\n\nclass _AudioTextDataset(Dataset):\n \"\"\"\n Dataset that loads tensors via a json file containing paths to audio files, transcripts, and durations (in seconds).\n Each new line is a different sample. Example below:\n {\"audio_filepath\": \"/path/to/audio.wav\", \"text_filepath\": \"/path/to/audio.txt\", \"duration\": 23.147}\n ...\n {\"audio_filepath\": \"/path/to/audio.wav\", \"text\": \"the transcription\", \"offset\": 301.75, \"duration\": 0.82, \"utt\":\n \"utterance_id\", \"ctm_utt\": \"en_4156\", \"side\": \"A\"}\n Args:\n manifest_filepath: Path to manifest json as described above. Can be comma-separated paths.\n labels: String containing all the possible characters to map to\n sample_rate (int): Sample rate to resample loaded audio to\n int_values (bool): If true, load samples as 32-bit integers. Defauts to False.\n augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor object used to augment loaded\n audio\n max_duration: If audio exceeds this length, do not include in dataset\n min_duration: If audio is less than this length, do not include in dataset\n max_utts: Limit number of utterances\n blank_index: blank character index, default = -1\n unk_index: unk_character index, default = -1\n normalize: whether to normalize transcript text (default): True\n bos_id: Id of beginning of sequence symbol to append if not None\n eos_id: Id of end of sequence symbol to append if not None\n load_audio: Boolean flag indicate whether do or not load audio\n add_misc: True if add additional info dict.\n \"\"\"\n\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n 'audio_signal': NeuralType(\n ('B', 'T'),\n AudioSignal(freq=self._sample_rate) # TODO: self._sample_rate is not defined anywhere\n if self is not None and hasattr(self, '_sample_rate')\n else AudioSignal(),\n ),\n 'a_sig_length': NeuralType(tuple('B'), LengthsType()),\n 'transcripts': NeuralType(('B', 'T'), LabelsType()),\n 'transcript_length': NeuralType(tuple('B'), LengthsType()),\n }\n\n def __init__(\n self,\n manifest_filepath: str,\n parser: Union[str, Callable],\n sample_rate: int,\n int_values: bool = False,\n augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,\n max_duration: Optional[int] = None,\n min_duration: Optional[int] = None,\n max_utts: int = 0,\n trim: bool = False,\n bos_id: Optional[int] = None,\n eos_id: Optional[int] = None,\n pad_id: int = 0,\n load_audio: bool = True,\n add_misc: bool = False,\n ):\n self.parser = parser\n\n self.collection = collections.ASRAudioText(\n manifests_files=manifest_filepath.split(','),\n parser=parser,\n min_duration=min_duration,\n max_duration=max_duration,\n max_number=max_utts,\n )\n\n self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)\n self.trim = trim\n self.eos_id = eos_id\n self.bos_id = bos_id\n self.pad_id = pad_id\n self.load_audio = load_audio\n self._add_misc = add_misc\n\n def __getitem__(self, index):\n sample = self.collection[index]\n if self.load_audio:\n offset = sample.offset\n\n if offset is None:\n offset = 0\n\n features = self.featurizer.process(\n sample.audio_file, offset=offset, duration=sample.duration, trim=self.trim, orig_sr=sample.orig_sr\n )\n f, fl = features, torch.tensor(features.shape[0]).long()\n else:\n f, fl = None, None\n\n t, tl = sample.text_tokens, len(sample.text_tokens)\n if self.bos_id is not None:\n t = [self.bos_id] + t\n tl += 1\n if self.eos_id is not None:\n t = t + [self.eos_id]\n tl += 1\n\n output = f, fl, torch.tensor(t).long(), torch.tensor(tl).long()\n\n if self._add_misc:\n misc = dict()\n misc['id'] = sample.id\n misc['text_raw'] = sample.text_raw\n misc['speaker'] = sample.speaker\n output = (output, misc)\n\n return output\n\n def __len__(self):\n return len(self.collection)\n\n def _collate_fn(self, batch):\n return _speech_collate_fn(batch, pad_id=self.pad_id)\n\n\n@experimental\nclass AudioToCharDataset(_AudioTextDataset):\n \"\"\"\n Dataset that loads tensors via a json file containing paths to audio\n files, transcripts, and durations (in seconds). Each new line is a\n different sample. Example below:\n {\"audio_filepath\": \"/path/to/audio.wav\", \"text_filepath\":\n \"/path/to/audio.txt\", \"duration\": 23.147}\n ...\n {\"audio_filepath\": \"/path/to/audio.wav\", \"text\": \"the\n transcription\", \"offset\": 301.75, \"duration\": 0.82, \"utt\":\n \"utterance_id\", \"ctm_utt\": \"en_4156\", \"side\": \"A\"}\n Args:\n manifest_filepath: Path to manifest json as described above. Can\n be comma-separated paths.\n labels: String containing all the possible characters to map to\n sample_rate (int): Sample rate to resample loaded audio to\n int_values (bool): If true, load samples as 32-bit integers. Defauts to False.\n augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor\n object used to augment loaded audio\n max_duration: If audio exceeds this length, do not include in dataset\n min_duration: If audio is less than this length, do not include\n in dataset\n max_utts: Limit number of utterances\n blank_index: blank character index, default = -1\n unk_index: unk_character index, default = -1\n normalize: whether to normalize transcript text (default): True\n bos_id: Id of beginning of sequence symbol to append if not None\n eos_id: Id of end of sequence symbol to append if not None\n load_audio: Boolean flag indicate whether do or not load audio\n add_misc: True if add additional info dict.\n \"\"\"\n\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n 'audio_signal': NeuralType(\n ('B', 'T'),\n AudioSignal(freq=self._sample_rate)\n if self is not None and hasattr(self, '_sample_rate')\n else AudioSignal(),\n ),\n 'a_sig_length': NeuralType(tuple('B'), LengthsType()),\n 'transcripts': NeuralType(('B', 'T'), LabelsType()),\n 'transcript_length': NeuralType(tuple('B'), LengthsType()),\n }\n\n def __init__(\n self,\n manifest_filepath: str,\n labels: Union[str, List[str]],\n sample_rate: int,\n int_values: bool = False,\n augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,\n max_duration: Optional[float] = None,\n min_duration: Optional[float] = None,\n max_utts: int = 0,\n blank_index: int = -1,\n unk_index: int = -1,\n normalize: bool = True,\n trim: bool = False,\n bos_id: Optional[int] = None,\n eos_id: Optional[int] = None,\n pad_id: int = 0,\n load_audio: bool = True,\n parser: Union[str, Callable] = 'en',\n add_misc: bool = False,\n ):\n self.labels = labels\n\n parser = parsers.make_parser(\n labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize\n )\n\n super().__init__(\n manifest_filepath=manifest_filepath,\n parser=parser,\n sample_rate=sample_rate,\n int_values=int_values,\n augmentor=augmentor,\n max_duration=max_duration,\n min_duration=min_duration,\n max_utts=max_utts,\n trim=trim,\n bos_id=bos_id,\n eos_id=eos_id,\n pad_id=pad_id,\n load_audio=load_audio,\n add_misc=add_misc,\n )\n\n\nclass AudioToCharWithDursDataset(AudioToCharDataset):\n \"\"\"\n Dataset that loads tensors via a json file containing paths to audio\n files, transcripts, and durations (in seconds). Each new line is a\n different sample. Example below:\n {\"audio_filepath\": \"/path/to/audio.wav\", \"text_filepath\":\n \"/path/to/audio.txt\", \"duration\": 23.147}\n ...\n {\"audio_filepath\": \"/path/to/audio.wav\", \"text\": \"the\n transcription\", \"offset\": 301.75, \"duration\": 0.82, \"utt\":\n \"utterance_id\", \"ctm_utt\": \"en_4156\", \"side\": \"A\"}\n\n Additionally, user provides path to precomputed durations, which is a pickled python dict with 'tags' and 'durs'\n keys, both of which are list of examples values. Tag is a unique example identifier, which is a wav filename\n without suffix. Durations are an additional tuple of two tensors: graphemes durations and blanks durations.\n Example below:\n {'tags': ['LJ050-0234', 'LJ019-0373'],\n 'durs': [(graphemes_durs0, blanks_durs0), (graphemes_durs1, blanks_durs1)]}\n\n Args:\n **kwargs: Passed to AudioToCharDataset constructor.\n durs_path (str): String path to pickled list of '[(tag, durs)]' durations location.\n rep (bool): True if repeat text graphemes according to durs.\n vocab: Vocabulary config (parser + set of graphemes to use). Constructor propagates these to\n `self.make_vocab` function call to build a complete vocabulary.\n \"\"\"\n\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n \"\"\"Returns definitions of module output ports.\"\"\"\n return {\n 'audio': NeuralType(\n ('B', 'T'),\n AudioSignal(freq=self._sample_rate)\n if self is not None and hasattr(self, '_sample_rate')\n else AudioSignal(),\n ),\n 'audio_len': NeuralType(('B',), LengthsType()),\n 'text': NeuralType(('B', 'T'), LabelsType()),\n 'text_len': NeuralType(('B',), LengthsType()),\n 'durs': NeuralType(('B', 'T'), LengthsType()),\n }\n\n @staticmethod\n def make_vocab(notation='chars', punct=True, spaces=False, stresses=False):\n \"\"\"Constructs vocabulary from given parameters.\n\n Args:\n notation (str): Either 'chars' or 'phonemes' as general notation.\n punct (bool): True if reserve grapheme for basic punctuation.\n spaces (bool): True if prepend spaces to every punctuation symbol.\n stresses (bool): True if use phonemes codes with stresses (0-2).\n\n Returns:\n (vocabs.Base) Vocabulary\n \"\"\"\n if notation == 'chars':\n vocab = vocabs.Chars(punct=punct, spaces=spaces)\n elif notation == 'phonemes':\n vocab = vocabs.Phonemes(punct=punct, stresses=stresses, spaces=spaces)\n else:\n raise ValueError(\"Unsupported vocab type.\")\n return vocab\n\n def __init__(self, **kwargs):\n durs_path = kwargs.pop('durs_path')\n rep = kwargs.pop('rep', False)\n self.vocab = self.make_vocab(**kwargs.pop('vocab', {}))\n kwargs.setdefault('labels', [])\n\n super().__init__(**kwargs)\n\n pth = torch.load(durs_path)\n tag2d = dict(zip(pth['tags'], pth['durs']))\n durs = []\n for i, e in enumerate(self.collection):\n tag = os.path.splitext(os.path.basename(e.audio_file))[0]\n durs.append(tag2d[tag])\n self.durs = durs\n self.rep = rep\n\n def __getitem__(self, item):\n sample = self.collection[item]\n audio, audio_len, _, _ = super().__getitem__(item) # noqa\n text = self.vocab.encode(sample.text_raw)\n text, text_len = torch.tensor(text).long(), torch.tensor(len(text)).long()\n blanks_durs, graphemes_durs = self.durs[item]\n\n return (\n audio,\n audio_len,\n text,\n text_len,\n blanks_durs,\n graphemes_durs,\n )\n\n @staticmethod\n def _merge(tensors, dim=0, value=0, dtype=None):\n \"\"\"Merges list of tensors into one.\"\"\"\n tensors = [tensor if isinstance(tensor, torch.Tensor) else torch.tensor(tensor) for tensor in tensors]\n dim = dim if dim != -1 else len(tensors[0].shape) - 1\n dtype = tensors[0].dtype if dtype is None else dtype\n max_len = max(tensor.shape[dim] for tensor in tensors)\n new_tensors = []\n for tensor in tensors:\n pad = (2 * len(tensor.shape)) * [0]\n pad[-2 * dim - 1] = max_len - tensor.shape[dim]\n new_tensors.append(F.pad(tensor, pad=pad, value=value))\n return torch.stack(new_tensors).to(dtype=dtype)\n\n @staticmethod\n def _interleave(x, y):\n \"\"\"Interleave two tensors.\"\"\"\n xy = torch.stack([x[:-1], y], dim=1).view(-1)\n xy = F.pad(xy, pad=[0, 1], value=x[-1])\n return xy\n\n def _collate_fn(self, batch):\n batch = list(zip(*batch))\n\n asr_batch = _speech_collate_fn(list(zip(*batch[:4])), pad_id=self.vocab.pad)\n audio, audio_len, text, text_len = asr_batch\n\n text = [\n self._interleave(\n x=torch.empty(len(t) + 1, dtype=torch.long, device=t.device,).fill_(self.vocab.blank), y=t,\n )\n for t in text\n ]\n text = self._merge(text, value=self.vocab.pad, dtype=torch.long)\n text_len = text_len * 2 + 1\n\n blanks_durs, graphemes_durs = batch[4:]\n durs = [self._interleave(b, c) for b, c in zip(blanks_durs, graphemes_durs)]\n durs = self._merge(durs, dtype=torch.long).to(text.device)\n\n if self.rep:\n text = self._merge(\n tensors=[torch.repeat_interleave(text1, durs1) for text1, durs1 in zip(text, durs)], dtype=torch.long,\n )\n text_len = durs.sum(-1)\n\n return (\n audio,\n audio_len,\n text,\n text_len,\n durs,\n )\n\n\n@experimental\nclass AudioToBPEDataset(_AudioTextDataset):\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n 'audio_signal': NeuralType(\n ('B', 'T'),\n AudioSignal(freq=self._sample_rate)\n if self is not None and hasattr(self, '_sample_rate')\n else AudioSignal(),\n ),\n 'a_sig_length': NeuralType(tuple('B'), LengthsType()),\n 'transcripts': NeuralType(('B', 'T'), LabelsType()),\n 'transcript_length': NeuralType(tuple('B'), LengthsType()),\n }\n\n def __init__(\n self,\n manifest_filepath: str,\n tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',\n sample_rate: int,\n int_values: bool = False,\n augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,\n max_duration: Optional[int] = None,\n min_duration: Optional[int] = None,\n max_utts: int = 0,\n trim: bool = False,\n load_audio: bool = True,\n add_misc: bool = False,\n use_start_end_token: bool = True,\n ):\n if use_start_end_token and hasattr(tokenizer, 'bos_token'):\n bos_id = tokenizer.bos_id\n else:\n bos_id = None\n\n if use_start_end_token and hasattr(tokenizer, 'eos_token'):\n eos_id = tokenizer.eos_id\n else:\n eos_id = None\n\n if hasattr(tokenizer, 'pad_token'):\n pad_id = tokenizer.pad_id\n else:\n pad_id = 0\n\n class TokenizerWrapper:\n def __init__(self, tokenizer):\n self._tokenizer = tokenizer\n\n def __call__(self, text):\n t = self._tokenizer.text_to_ids(text)\n return t\n\n super().__init__(\n manifest_filepath=manifest_filepath,\n parser=TokenizerWrapper(tokenizer),\n sample_rate=sample_rate,\n int_values=int_values,\n augmentor=augmentor,\n max_duration=max_duration,\n min_duration=min_duration,\n max_utts=max_utts,\n bos_id=bos_id,\n eos_id=eos_id,\n pad_id=pad_id,\n trim=trim,\n load_audio=load_audio,\n add_misc=add_misc,\n )\n\n\n# Ported from https://github.com/NVIDIA/OpenSeq2Seq/blob/master/open_seq2seq/data/speech2text/speech_commands.py\n@experimental\nclass AudioLabelDataset(Dataset):\n \"\"\"\n Dataset that loads tensors via a json file containing paths to audio\n files, command class, and durations (in seconds). Each new line is a\n different sample. Example below:\n {\"audio_filepath\": \"/path/to/audio.wav\", \"label\":\n \"label\", \"duration\": 23.147}\n ...\n {\"audio_filepath\": \"/path/to/audio.wav\", \"label\": \"label\",\n \"offset\": 301.75, \"duration\": 0.82}\n Args:\n manifest_filepath: Path to manifest json as described above. Can\n be comma-separated paths.\n labels (Optional[list]): String containing all the possible labels to map to\n if None then automatically picks from ASRSpeechLabel collection.\n featurizer: Initialized featurizer class that converts paths of\n audio to feature tensors\n max_duration: If audio exceeds this length, do not include in dataset\n min_duration: If audio is less than this length, do not include\n in dataset\n trim: Boolean flag whether to trim the audio\n load_audio: Boolean flag indicate whether do or not load audio\n \"\"\"\n\n def __init__(\n self,\n manifest_filepath,\n featurizer,\n labels=None,\n max_duration=None,\n min_duration=None,\n trim=False,\n load_audio=True,\n ):\n self.collection = collections.ASRSpeechLabel(\n manifests_files=manifest_filepath.split(','), min_duration=min_duration, max_duration=max_duration\n )\n\n self.featurizer = featurizer\n self.trim = trim\n self.load_audio = load_audio\n\n self.labels = labels if labels else self.collection.uniq_labels\n self.num_commands = len(self.labels)\n\n self.label2id, self.id2label = {}, {}\n for label_id, label in enumerate(self.labels):\n self.label2id[label] = label_id\n self.id2label[label_id] = label\n\n def __getitem__(self, index):\n sample = self.collection[index]\n if self.load_audio:\n offset = sample.offset\n\n if offset is None:\n offset = 0\n\n features = self.featurizer.process(\n sample.audio_file, offset=offset, duration=sample.duration, trim=self.trim\n )\n f, fl = features, torch.tensor(features.shape[0]).long()\n else:\n f, fl = None, None\n\n t = self.label2id[sample.label]\n tl = 1 # For compatibility with collate_fn used later\n\n return f, fl, torch.tensor(t).long(), torch.tensor(tl).long()\n\n def __len__(self):\n return len(self.collection)\n\n def _collate_fn(self, batch):\n \"\"\"collate batch of audio sig, audio len, tokens (single token), tokens len (1)\n Args:\n batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,\n LongTensor): A tuple of tuples of signal, signal lengths,\n encoded tokens, and encoded tokens length. This collate func\n assumes the signals are 1d torch tensors (i.e. mono audio).\n \"\"\"\n return _speech_collate_fn(batch, pad_id=0)\n\n\n@experimental\nclass _TarredAudioToTextDataset(IterableDataset):\n \"\"\"\n A similar Dataset to the AudioToCharDataset/AudioToBPEDataset, but which loads tarred audio files.\n\n Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset/AudioToBPEDataset),\n as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should\n contain the information for one audio file, including at least the transcript and name of the audio\n file within the tarball.\n\n Valid formats for the audio_tar_filepaths argument include:\n (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or\n (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].\n\n Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.\n This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.\n Supported opening braces - { <=> (, [, < and the special tag _OP_.\n Supported closing braces - } <=> ), ], > and the special tag _CL_.\n For SLURM based tasks, we suggest the use of the special tags for ease of use.\n\n See the WebDataset documentation for more information about accepted data and input formats.\n\n If using multiple processes the number of shards should be divisible by the number of workers to ensure an\n even split among workers. If it is not divisible, logging will give a warning but training will proceed.\n In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering\n is applied. We currently do not check for this, but your program may hang if the shards are uneven!\n\n Notice that a few arguments are different from the AudioToCharDataset; for example, shuffle (bool) has been\n replaced by shuffle_n (int).\n\n Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest\n after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.\n\n Args:\n audio_tar_filepaths: Either a list of audio tarball filepaths, or a\n string (can be brace-expandable).\n manifest_filepath (str): Path to the manifest.\n parser (callable): A callable which is used to pre-process the text output.\n sample_rate (int): Sample rate to resample loaded audio to\n int_values (bool): If true, load samples as 32-bit integers. Defauts to False.\n augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor\n object used to augment loaded audio\n shuffle_n (int): How many samples to look ahead and load to be shuffled.\n See WebDataset documentation for more details.\n Defaults to 0.\n min_duration (float): Dataset parameter.\n All training files which have a duration less than min_duration\n are dropped. Note: Duration is read from the manifest JSON.\n Defaults to 0.1.\n max_duration (float): Dataset parameter.\n All training files which have a duration more than max_duration\n are dropped. Note: Duration is read from the manifest JSON.\n Defaults to None.\n max_utts (int): Limit number of utterances. 0 means no maximum.\n blank_index (int): Blank character index, defaults to -1.\n unk_index (int): Unknown character index, defaults to -1.\n normalize (bool): Dataset parameter.\n Whether to use automatic text cleaning.\n It is highly recommended to manually clean text for best results.\n Defaults to True.\n trim (bool): Whether to use trim silence from beginning and end\n of audio signal using librosa.effects.trim().\n Defaults to False.\n bos_id (id): Dataset parameter.\n Beginning of string symbol id used for seq2seq models.\n Defaults to None.\n eos_id (id): Dataset parameter.\n End of string symbol id used for seq2seq models.\n Defaults to None.\n pad_id (id): Token used to pad when collating samples in batches.\n If this is None, pads using 0s.\n Defaults to None.\n global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.\n world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.\n \"\"\"\n\n def __init__(\n self,\n audio_tar_filepaths: Union[str, List[str]],\n manifest_filepath: str,\n parser: Callable,\n sample_rate: int,\n int_values: bool = False,\n augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,\n shuffle_n: int = 0,\n min_duration: Optional[float] = None,\n max_duration: Optional[float] = None,\n max_utts: int = 0,\n trim: bool = False,\n bos_id: Optional[int] = None,\n eos_id: Optional[int] = None,\n add_misc: bool = False,\n pad_id: int = 0,\n global_rank: int = 0,\n world_size: int = 0,\n ):\n self.collection = collections.ASRAudioText(\n manifests_files=manifest_filepath.split(','),\n parser=parser,\n min_duration=min_duration,\n max_duration=max_duration,\n max_number=max_utts,\n index_by_file_id=True, # Must set this so the manifest lines can be indexed by file ID\n )\n\n self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)\n self.trim = trim\n self.eos_id = eos_id\n self.bos_id = bos_id\n self.pad_id = pad_id\n self._add_misc = add_misc\n\n if isinstance(audio_tar_filepaths, str):\n # Replace '(' and '[' with '{'\n brace_keys_open = ['(', '[', '<', '_OP_']\n for bkey in brace_keys_open:\n if bkey in audio_tar_filepaths:\n audio_tar_filepaths = audio_tar_filepaths.replace(bkey, \"{\")\n\n # Replace ')' and ']' with '}'\n brace_keys_close = [')', ']', '>', '_CL_']\n for bkey in brace_keys_close:\n if bkey in audio_tar_filepaths:\n audio_tar_filepaths = audio_tar_filepaths.replace(bkey, \"}\")\n\n # Check for distributed and partition shards accordingly\n if world_size > 1:\n if isinstance(audio_tar_filepaths, str):\n # Brace expand\n audio_tar_filepaths = list(braceexpand.braceexpand(audio_tar_filepaths))\n\n if len(audio_tar_filepaths) % world_size != 0:\n logging.warning(\n f\"Number of shards in tarred dataset ({len(audio_tar_filepaths)}) is not divisible \"\n f\"by number of distributed workers ({world_size}).\"\n )\n\n begin_idx = (len(audio_tar_filepaths) // world_size) * global_rank\n end_idx = begin_idx + (len(audio_tar_filepaths) // world_size)\n audio_tar_filepaths = audio_tar_filepaths[begin_idx:end_idx]\n logging.info(\n \"Partitioning tarred dataset: process (%d) taking shards [%d, %d)\", global_rank, begin_idx, end_idx\n )\n\n # Put together WebDataset\n self._dataset = (\n wd.Dataset(audio_tar_filepaths)\n .shuffle(shuffle_n)\n .rename(audio='wav', key='__key__')\n .to_tuple('audio', 'key')\n .pipe(self._filter)\n .map(f=self._build_sample)\n )\n\n def _filter(self, iterator):\n \"\"\"This function is used to remove samples that have been filtered out by ASRAudioText already.\n Otherwise, we would get a KeyError as _build_sample attempts to find the manifest entry for a sample\n that was filtered out (e.g. for duration).\n Note that if using multi-GPU training, filtering may lead to an imbalance in samples in each shard,\n which may make your code hang as one process will finish before the other.\n \"\"\"\n\n class TarredAudioFilter:\n def __init__(self, collection):\n self.iterator = iterator\n self.collection = collection\n\n def __iter__(self):\n return self\n\n def __next__(self):\n while True:\n audio_bytes, audio_filename = next(self.iterator)\n file_id, _ = os.path.splitext(os.path.basename(audio_filename))\n if file_id in self.collection.mapping:\n return audio_bytes, audio_filename\n\n return TarredAudioFilter(self.collection)\n\n def _collate_fn(self, batch):\n return _speech_collate_fn(batch, self.pad_id)\n\n def _build_sample(self, tup):\n \"\"\"Builds the training sample by combining the data from the WebDataset with the manifest info.\n \"\"\"\n audio_bytes, audio_filename = tup\n\n # Grab manifest entry from self.collection\n file_id, _ = os.path.splitext(os.path.basename(audio_filename))\n manifest_idx = self.collection.mapping[file_id]\n manifest_entry = self.collection[manifest_idx]\n\n offset = manifest_entry.offset\n if offset is None:\n offset = 0\n\n # Convert audio bytes to IO stream for processing (for SoundFile to read)\n audio_filestream = io.BytesIO(audio_bytes)\n features = self.featurizer.process(\n audio_filestream,\n offset=offset,\n duration=manifest_entry.duration,\n trim=self.trim,\n orig_sr=manifest_entry.orig_sr,\n )\n audio_filestream.close()\n\n # Audio features\n f, fl = features, torch.tensor(features.shape[0]).long()\n\n # Text features\n t, tl = manifest_entry.text_tokens, len(manifest_entry.text_tokens)\n if self.bos_id is not None:\n t = [self.bos_id] + t\n tl += 1\n if self.eos_id is not None:\n t = t + [self.eos_id]\n tl += 1\n\n return f, fl, torch.tensor(t).long(), torch.tensor(tl).long()\n\n def __iter__(self):\n return self._dataset.__iter__()\n\n def __len__(self):\n return len(self.collection)\n\n\n@experimental\nclass TarredAudioToCharDataset(_TarredAudioToTextDataset):\n \"\"\"\n A similar Dataset to the AudioToCharDataset, but which loads tarred audio files.\n\n Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset),\n as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should\n contain the information for one audio file, including at least the transcript and name of the audio\n file within the tarball.\n\n Valid formats for the audio_tar_filepaths argument include:\n (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or\n (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].\n\n See the WebDataset documentation for more information about accepted data and input formats.\n\n If using multiple processes the number of shards should be divisible by the number of workers to ensure an\n even split among workers. If it is not divisible, logging will give a warning but training will proceed.\n In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering\n is applied. We currently do not check for this, but your program may hang if the shards are uneven!\n\n Notice that a few arguments are different from the AudioToCharDataset; for example, shuffle (bool) has been\n replaced by shuffle_n (int).\n\n Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest\n after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.\n\n Args:\n audio_tar_filepaths: Either a list of audio tarball filepaths, or a\n string (can be brace-expandable).\n manifest_filepath (str): Path to the manifest.\n labels (list): List of characters that can be output by the ASR model.\n For Jasper, this is the 28 character set {a-z '}. The CTC blank\n symbol is automatically added later for models using ctc.\n sample_rate (int): Sample rate to resample loaded audio to\n int_values (bool): If true, load samples as 32-bit integers. Defauts to False.\n augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor\n object used to augment loaded audio\n shuffle_n (int): How many samples to look ahead and load to be shuffled.\n See WebDataset documentation for more details.\n Defaults to 0.\n min_duration (float): Dataset parameter.\n All training files which have a duration less than min_duration\n are dropped. Note: Duration is read from the manifest JSON.\n Defaults to 0.1.\n max_duration (float): Dataset parameter.\n All training files which have a duration more than max_duration\n are dropped. Note: Duration is read from the manifest JSON.\n Defaults to None.\n max_utts (int): Limit number of utterances. 0 means no maximum.\n blank_index (int): Blank character index, defaults to -1.\n unk_index (int): Unknown character index, defaults to -1.\n normalize (bool): Dataset parameter.\n Whether to use automatic text cleaning.\n It is highly recommended to manually clean text for best results.\n Defaults to True.\n trim (bool): Whether to use trim silence from beginning and end\n of audio signal using librosa.effects.trim().\n Defaults to False.\n bos_id (id): Dataset parameter.\n Beginning of string symbol id used for seq2seq models.\n Defaults to None.\n eos_id (id): Dataset parameter.\n End of string symbol id used for seq2seq models.\n Defaults to None.\n pad_id (id): Token used to pad when collating samples in batches.\n If this is None, pads using 0s.\n Defaults to None.\n global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.\n world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.\n \"\"\"\n\n def __init__(\n self,\n audio_tar_filepaths: Union[str, List[str]],\n manifest_filepath: str,\n labels: List[str],\n sample_rate: int,\n int_values: bool = False,\n augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,\n shuffle_n: int = 0,\n min_duration: Optional[float] = None,\n max_duration: Optional[float] = None,\n max_utts: int = 0,\n blank_index: int = -1,\n unk_index: int = -1,\n normalize: bool = True,\n trim: bool = False,\n bos_id: Optional[int] = None,\n eos_id: Optional[int] = None,\n parser: Optional[str] = 'en',\n add_misc: bool = False,\n pad_id: int = 0,\n global_rank: int = 0,\n world_size: int = 0,\n ):\n self.labels = labels\n\n parser = parsers.make_parser(\n labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize\n )\n\n super().__init__(\n audio_tar_filepaths=audio_tar_filepaths,\n manifest_filepath=manifest_filepath,\n parser=parser,\n sample_rate=sample_rate,\n int_values=int_values,\n augmentor=augmentor,\n shuffle_n=shuffle_n,\n min_duration=min_duration,\n max_duration=max_duration,\n max_utts=max_utts,\n trim=trim,\n bos_id=bos_id,\n eos_id=eos_id,\n add_misc=add_misc,\n pad_id=pad_id,\n global_rank=global_rank,\n world_size=world_size,\n )\n\n\n@experimental\nclass TarredAudioToBPEDataset(_TarredAudioToTextDataset):\n \"\"\"\n A similar Dataset to the AudioToBPEDataset, but which loads tarred audio files.\n\n Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToBPEDataset),\n as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should\n contain the information for one audio file, including at least the transcript and name of the audio\n file within the tarball.\n\n Valid formats for the audio_tar_filepaths argument include:\n (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or\n (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].\n\n See the WebDataset documentation for more information about accepted data and input formats.\n\n If using multiple processes the number of shards should be divisible by the number of workers to ensure an\n even split among workers. If it is not divisible, logging will give a warning but training will proceed.\n In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering\n is applied. We currently do not check for this, but your program may hang if the shards are uneven!\n\n Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been\n replaced by shuffle_n (int).\n\n Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest\n after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.\n\n Args:\n audio_tar_filepaths: Either a list of audio tarball filepaths, or a\n string (can be brace-expandable).\n manifest_filepath (str): Path to the manifest.\n tokenizer (TokenizerSpec): Either a Word Piece Encoding tokenizer (BERT),\n or a Sentence Piece Encoding tokenizer (BPE). The CTC blank\n symbol is automatically added later for models using ctc.\n sample_rate (int): Sample rate to resample loaded audio to\n int_values (bool): If true, load samples as 32-bit integers. Defauts to False.\n augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor\n object used to augment loaded audio\n shuffle_n (int): How many samples to look ahead and load to be shuffled.\n See WebDataset documentation for more details.\n Defaults to 0.\n min_duration (float): Dataset parameter.\n All training files which have a duration less than min_duration\n are dropped. Note: Duration is read from the manifest JSON.\n Defaults to 0.1.\n max_duration (float): Dataset parameter.\n All training files which have a duration more than max_duration\n are dropped. Note: Duration is read from the manifest JSON.\n Defaults to None.\n max_utts (int): Limit number of utterances. 0 means no maximum.\n trim (bool): Whether to use trim silence from beginning and end\n of audio signal using librosa.effects.trim().\n Defaults to False.\n pad_id (id): Token used to pad when collating samples in batches.\n If this is None, pads using 0s.\n Defaults to None.\n global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.\n world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.\n \"\"\"\n\n def __init__(\n self,\n audio_tar_filepaths: Union[str, List[str]],\n manifest_filepath: str,\n tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',\n sample_rate: int,\n int_values: bool = False,\n augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,\n shuffle_n: int = 0,\n min_duration: Optional[float] = None,\n max_duration: Optional[float] = None,\n max_utts: int = 0,\n trim: bool = False,\n add_misc: bool = False,\n global_rank: int = 0,\n world_size: int = 0,\n use_start_end_token: bool = True,\n ):\n if use_start_end_token and hasattr(tokenizer, 'bos_token'):\n bos_id = tokenizer.bos_id\n else:\n bos_id = None\n\n if use_start_end_token and hasattr(tokenizer, 'eos_token'):\n eos_id = tokenizer.eos_id\n else:\n eos_id = None\n\n if hasattr(tokenizer, 'pad_token'):\n pad_id = tokenizer.pad_id\n else:\n pad_id = 0\n\n class TokenizerWrapper:\n def __init__(self, tokenizer):\n self._tokenizer = tokenizer\n\n def __call__(self, text):\n t = self._tokenizer.text_to_ids(text)\n return t\n\n super().__init__(\n audio_tar_filepaths=audio_tar_filepaths,\n manifest_filepath=manifest_filepath,\n parser=TokenizerWrapper(tokenizer),\n sample_rate=sample_rate,\n int_values=int_values,\n augmentor=augmentor,\n shuffle_n=shuffle_n,\n min_duration=min_duration,\n max_duration=max_duration,\n max_utts=max_utts,\n trim=trim,\n bos_id=bos_id,\n eos_id=eos_id,\n add_misc=add_misc,\n pad_id=pad_id,\n global_rank=global_rank,\n world_size=world_size,\n )\n"
] | [
[
"torch.stack",
"torch.cat"
],
[
"torch.stack",
"torch.load",
"torch.repeat_interleave",
"torch.nn.functional.pad",
"torch.tensor"
]
] |
GBATZOLIS/CAFLOW | [
"ea33f84c424bd8e46999be59cd5d52bd8f0a3a77"
] | [
"caflow/models/modules/mri_to_pet/UnconditionalFlow.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 19 22:02:53 2021\n\n@author: gbatz97\n\"\"\"\n\nimport torch.nn as nn\nimport torch\nfrom caflow.models.modules.blocks.FlowBlock import FlowBlock \nfrom caflow.models.modules.blocks.Dequantisation import Dequantisation, VariationalDequantization\n\nclass UnconditionalFlow(nn.Module):\n def __init__(self, channels, dim, resolution, scales, scale_depth, quants, vardeq_depth, coupling_type, nn_settings):\n super(UnconditionalFlow, self).__init__()\n \n self.channels = channels\n self.dim = dim\n self.resolution = resolution\n self.scales = scales\n \n self.scale_blocks = nn.ModuleList()\n\n if vardeq_depth is None:\n self.scale_blocks.append(Dequantisation(dim=dim, quants=quants))\n else:\n self.scale_blocks.append(VariationalDequantization(channels=channels, depth=vardeq_depth, dim=dim, \\\n resolution=self.calculate_resolution(dim, 0),\\\n quants=quants, coupling_type=coupling_type, nn_settings=nn_settings))\n\n for scale in range(self.scales):\n scale_channels = self.calculate_scale_channels(dim, scale)\n resolution = self.calculate_resolution(dim, scale)\n self.scale_blocks.append(FlowBlock(channels = scale_channels, dim = dim,\n resolution=resolution, depth = scale_depth,\n coupling_type=coupling_type, nn_settings=nn_settings))\n\n # Create prior distribution for final latent space\n self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)\n \n def calculate_resolution(self, dim, scale):\n if isinstance(self.resolution, int):\n resolution = tuple([self.resolution//2**scale for _ in range(self.dim)])\n else:\n resolution = tuple([x//2**scale for x in self.resolution])\n return resolution\n\n def calculate_scale_channels(self, dim, scale):\n if scale==0:\n return 2 ** (dim * scale) * self.channels\n else:\n return 2 ** ((dim-1) * scale) * self.channels\n \n \n def forward(self, y=None, z=[], logprior=0., logdet=0., reverse=False):\n if reverse:\n #assert z is not None\n y_dec, logdet = self.decode(z, logdet=logdet)\n return y_dec, logdet\n else:\n #assert y is not None\n z_enc, logprior, logdet = self.encode(y, logprior=logprior, logdet=logdet)\n return z_enc, logprior, logdet\n \n def encode(self, y, logprior, logdet):\n #y is the HR image/scan that we want to encode in the latent space\n #z_enc: list of the encoded latent tensors in ascending scale order\n #order: from scale 1 (dim=orig_dim/2) --- to --- scale n (dim=orig_dim/2^n)\n\n h_pass = y\n z_enc = []\n \n h_pass, logdet = self.scale_blocks[0](h_pass, logdet, False) #dequantisation\n for i in range(1, self.scales+1):\n if i==self.scales:\n h_split, logdet = self.scale_blocks[i](h_pass, logdet, False)\n else:\n h, logdet = self.scale_blocks[i](h_pass, logdet, False)\n h_split, h_pass = h.chunk(2, dim=1)\n \n logprior+=self.prior.log_prob(h_split).sum(dim = [i+1 for i in range(self.dim+1)])\n z_enc.append(h_split)\n\n return z_enc, logprior, logdet\n\n\n def decode(self, z:list, logdet):\n #z is a list of the latent tensors of the different scales.\n #The tensors of different scales have been put in an ascending order\n #z = [h_split(1st scale)-size:D/2, ..., h_split(nth scale)-size:D/2^n]\n \n h_pass=None\n for i in range(self.scales):\n h_split = z[self.scales-1-i]\n if h_pass==None:\n concat_pass = h_split\n else:\n concat_pass = torch.cat([h_split, h_pass], dim=1)\n h_pass, logdet = self.scale_blocks[self.scales-i](concat_pass, logdet, True)\n \n h_pass, logdet = self.scale_blocks[0](h_pass, logdet, True) #quantisation\n \n return h_pass, logdet\n\n\n\"\"\"\n#instantiate the unconditional flow\nrflow = UnconditionalFlow(channels=1, dim=3, scales=4, scale_depth=3, network = GatedConvNet)\n\ny = torch.randn((2, 1, 64, 64, 64), dtype=torch.float32)\nprint('y shape: ', y.size())\n\nprint('Encoding y with the forward pass...We get z_enc (same dimensionality)')\nz_enc, logprior, logdet = rflow(y=y)\n\nprint('z_enc elements:')\nfor i, elem in enumerate(z_enc):\n print(i, elem.size())\n \nprint('logprior size: ', logprior.size())\nprint('logdet size: ', logdet.size())\n\nprint('Decoding y_dec from its z_enc enconding... We pass z_enc through the backward pass.')\ny_dec, logdet = rflow(z=z_enc, reverse=True)\nprint('y_dec size:', y_dec.size())\n\nr = torch.abs(y-y_dec)\nprint('sum(|y-y_dec|)',torch.sum(r))\nprint('mean(|y-y_dec|):',torch.mean(r))\n\"\"\"\n \n\n"
] | [
[
"torch.nn.ModuleList",
"torch.distributions.normal.Normal",
"torch.cat"
]
] |
tech-srl/c3po | [
"a673a0514ee8c800efa12574ef8da3fcb8ef73b7"
] | [
"LaserTagger/Models/TransformerCRF_V2.py"
] | [
"from torch import nn\nfrom Models.CRF import CRF\nfrom Models.Transformer import Transformer\nfrom Models.TransformerCtx import TransformerCtx\nfrom Models.SequenceEncoder import SequenceEncoder\nfrom Models.Attention import Attention\nimport torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\n\nclass Transformer_CRF(nn.Module):\n def __init__(self, vocab_size, ctx_vocab_size, nb_labels, emb_dim, hidden_dim, bos_idx, eos_idx, pad_idx, num_lstm_layers, dropout, device):\n super().__init__()\n self.transformer = Transformer(\n vocab_size, in_dim=emb_dim, nb_labels=nb_labels, dropout=dropout\n )\n self.crf = CRF(\n nb_labels,\n device,\n bos_idx,\n eos_idx,\n pad_tag_id=pad_idx,\n batch_first=True,\n )\n self.ctx_encoder = TransformerCtx(ctx_vocab_size, device=device, in_dim=emb_dim)\n self.ctx_combiner = Attention(emb_dim)\n self.query = nn.Parameter(torch.Tensor(1, emb_dim))\n torch.nn.init.xavier_uniform_(self.query.data)\n self.emb_dim = emb_dim\n self.ctx_linear = nn.Linear(2 * emb_dim, emb_dim)\n\n def combine_ctx(self, x, before_ctx, after_ctx):\n # (batch, h_dim)\n before_ctx_encoded = self.before_ctx_encoder(before_ctx)\n after_ctx_encoded = self.after_ctx_encoder(after_ctx)\n\n # (batch, 2 * h_dim)\n ctx_cat = torch.cat((before_ctx_encoded, after_ctx_encoded), dim=1)\n # (batch, h_dim)\n encoded_ctx = torch.tanh(self.ctx_linear(ctx_cat))\n\n seq_len = x.shape[1]\n\n # (batch, seq_len, h_dim)\n encoded_ctx_repeated = encoded_ctx.unsqueeze(dim=0).repeat(seq_len, 1, 1)\n return encoded_ctx_repeated\n\n def forward_ctx(self, x, before_ctx, after_ctx):\n batch_size = x.shape[0]\n # (batch_size, 1, emb_dim)\n query = self.query.expand(batch_size, self.emb_dim).unsqueeze(dim=1)\n packed_query = pack_padded_sequence(query, batch_size * [1], batch_first=True, enforce_sorted=False)\n # Packed sequence (before_ctx_length, batch_size, emb_dim)\n encoded_before_ctx = self.ctx_encoder(before_ctx)\n # (batch_size, 1, emb_dim)\n encoded_before_ctx, _ = self.ctx_combiner(packed_query, encoded_before_ctx)\n # Packed sequence (after_ctx_length, batch_size, emb_dim)\n encoded_after_ctx = self.ctx_encoder(after_ctx)\n # (batch_size, 1 ,emb_dim)\n encoded_after_ctx, _ = self.ctx_combiner(packed_query, encoded_after_ctx)\n # (batch_size ,emb_dim)\n combined_ctx = self.ctx_linear(torch.cat([encoded_before_ctx, encoded_after_ctx], dim=2).squeeze())\n # (1, batch_size ,emb_dim)\n combined_ctx = combined_ctx.unsqueeze(dim=0)\n seq_len = x.shape[1]\n # (seq_len, batch_size, emb_dim)\n combined_ctx = combined_ctx.repeat(seq_len, 1, 1)\n return combined_ctx\n\n def forward(self, x, before_ctx, after_ctx, mask=None):\n # (seq_len, batch_size, emb_dim)\n combined_ctx = self.forward_ctx(x, before_ctx, after_ctx)\n # (batch_size, src_length, num_labels)\n emissions = self.transformer(x, combined_ctx, mask)\n score, path = self.crf.decode(emissions, mask=mask)\n return score, path\n\n def loss(self, x, before_ctx, after_ctx, y, mask=None):\n # (seq_len, batch_size, emb_dim)\n combined_ctx = self.forward_ctx(x, before_ctx, after_ctx)\n # (batch_size, src_length, num_labels)\n emissions = self.transformer(x, combined_ctx, mask)\n nll = self.crf(emissions, y, mask=mask)\n return nll\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.nn.Linear",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.cat",
"torch.Tensor"
]
] |
kevinintel/neural-compressor | [
"b57645566aeff8d3c18dc49d2739a583c072f940"
] | [
"examples/pytorch/eager/image_recognition/cifar100/main.py"
] | [
"import os\nimport time\nimport shutil\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models.vgg as vgg\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\nfrom plain_cnn_cifar import ConvNetMaker, plane_cifar100_book\n\n# used for logging to TensorBoard\nfrom tensorboard_logger import configure, log_value\n\nparser = argparse.ArgumentParser(description='PyTorch CNN or VGG Training')\nparser.add_argument('--dataset', default='cifar100', type=str,\n help='dataset cifar100')\nparser.add_argument('--epochs', default=200, type=int,\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int,\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=128, type=int,\n help='mini-batch size (default: 128)')\nparser.add_argument('--lr', '--learning-rate', default=0.02, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, help='momentum')\nparser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')\nparser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,\n help='weight decay (default: 5e-4)')\nparser.add_argument('--print-freq', '-p', default=10, type=int,\n help='print frequency (default: 10)')\nparser.add_argument('--droprate', default=0, type=float,\n help='dropout probability (default: 0.0)')\nparser.add_argument('--no-augment', dest='augment', action='store_false',\n help='whether to use standard augmentation (default: True)')\nparser.add_argument('--resume', default='', type=str,\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--name', default='CNN-2', type=str,\n help='name of experiment')\nparser.add_argument('--student_type', default='CNN-2', type=str,\n help='type of student model (CNN-2 [default] or VGG-8)')\nparser.add_argument('--teacher_type', default='CNN-10', type=str,\n help='type of teacher model (CNN-10 [default] or VGG-13)')\nparser.add_argument('--teacher_model', default='runs/CNN-10/model_best.pth.tar', type=str,\n help='path of teacher model')\nparser.add_argument('--tensorboard',\n help='Log progress to TensorBoard', action='store_true')\n\nparser.add_argument(\"--seed\", type=int, default=5143, help=\"A seed for reproducible training.\")\nparser.add_argument(\"--config\", default='distillation.yaml', help=\"pruning config\")\nparser.add_argument(\"--temperature\", default=1, type=float,\n help='temperature parameter of distillation')\nparser.add_argument(\"--loss_types\", default=['CE', 'KL'], type=str, nargs='+',\n help='loss types of distillation, should be a list of length 2, '\n 'first for student targets loss, second for teacher student loss.')\nparser.add_argument(\"--loss_weights\", default=[0.5, 0.5], type=float, nargs='+',\n help='loss weights of distillation, should be a list of length 2, '\n 'and sum to 1.0, first for student targets loss weight, '\n 'second for teacher student loss weight.')\nparser.set_defaults(augment=True)\n\ndef set_seed(seed):\n import random\n import numpy as np\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\ndef main():\n global args, best_prec1\n args, _ = parser.parse_known_args()\n best_prec1 = 0\n if args.seed is not None:\n set_seed(args.seed)\n if args.tensorboard: configure(\"runs/%s\"%(args.name))\n\n # Data loading code\n normalize = transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761])\n\n if args.augment:\n transform_train = transforms.Compose([\n \ttransforms.ToTensor(),\n \ttransforms.Lambda(lambda x: F.pad(x.unsqueeze(0),\n \t\t\t\t\t\t(4,4,4,4),mode='reflect').squeeze()),\n transforms.ToPILImage(),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n else:\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])\n\n # create teacher and student model\n if args.teacher_type == 'CNN-10':\n teacher_model = ConvNetMaker(plane_cifar100_book['10'])\n elif args.teacher_type == 'VGG-13':\n teacher_model = vgg.vgg13(num_classes=100)\n else:\n raise NotImplementedError('Unsupported teacher model type')\n teacher_model.load_state_dict(torch.load(args.teacher_model)['state_dict'])\n \n if args.student_type == 'CNN-2':\n student_model = ConvNetMaker(plane_cifar100_book['2'])\n elif args.student_type == 'VGG-8':\n student_model = vgg.VGG(vgg.make_layers([64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M']), num_classes=100)\n else:\n raise NotImplementedError('Unsupported student model type')\n\n # get the number of model parameters\n print('Number of teacher model parameters: {}'.format(\n sum([p.data.nelement() for p in teacher_model.parameters()])))\n print('Number of student model parameters: {}'.format(\n sum([p.data.nelement() for p in student_model.parameters()])))\n\n kwargs = {'num_workers': 0, 'pin_memory': True}\n assert(args.dataset == 'cifar100')\n train_dataset = datasets.__dict__[args.dataset.upper()]('../data', \n train=True, download=True,\n transform=transform_train)\n # get logits of teacher model\n if args.loss_weights[1] > 0:\n from tqdm import tqdm\n def get_logits(teacher_model, train_dataset):\n print(\"***** Getting logits of teacher model *****\")\n print(f\" Num examples = {len(train_dataset) }\")\n logits_file = os.path.join(os.path.dirname(args.teacher_model), 'teacher_logits.npy')\n if not os.path.exists(logits_file):\n teacher_model.eval()\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)\n train_dataloader = tqdm(train_dataloader, desc=\"Evaluating\")\n teacher_logits = []\n for step, (input, target) in enumerate(train_dataloader):\n outputs = teacher_model(input)\n teacher_logits += [x for x in outputs.numpy()]\n np.save(logits_file, np.array(teacher_logits))\n else:\n teacher_logits = np.load(logits_file)\n train_dataset.targets = [{'labels':l, 'teacher_logits':tl} \\\n for l, tl in zip(train_dataset.targets, teacher_logits)]\n return train_dataset\n with torch.no_grad():\n train_dataset = get_logits(teacher_model, train_dataset)\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=args.batch_size, shuffle=True, **kwargs)\n val_loader = torch.utils.data.DataLoader(\n datasets.__dict__[args.dataset.upper()]('../data', train=False, transform=transform_test),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n student_model.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n # define optimizer\n optimizer = torch.optim.SGD(student_model.parameters(), args.lr,\n momentum=args.momentum, nesterov = args.nesterov,\n weight_decay=args.weight_decay)\n\n # cosine learning rate\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader)*args.epochs)\n\n def train_func(model):\n return train(train_loader, model, scheduler, distiller, best_prec1)\n\n def eval_func(model):\n return validate(val_loader, model, distiller)\n\n from neural_compressor.experimental import Distillation, common\n from neural_compressor.experimental.common.criterion import PyTorchKnowledgeDistillationLoss\n \n distiller = Distillation(args.config)\n distiller.teacher_model = common.Model(teacher_model)\n distiller.student_model = common.Model(student_model)\n distiller.train_func = train_func\n distiller.eval_func = eval_func\n distiller.optimizer = optimizer\n distiller.criterion = PyTorchKnowledgeDistillationLoss(\n temperature=args.temperature,\n loss_types=args.loss_types,\n loss_weights=args.loss_weights)\n model = distiller()\n \n directory = \"runs/%s/\"%(args.name)\n os.makedirs(directory, exist_ok=True)\n model.save(directory)\n # change to framework model for further use\n model = model.model\n\ndef train(train_loader, model, scheduler, distiller, best_prec1):\n distiller.pre_epoch_begin()\n for epoch in range(args.start_epoch, args.epochs):\n \"\"\"Train for one epoch on the training set\"\"\"\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n teacher_logits = None\n if isinstance(target, dict):\n teacher_logits = target['teacher_logits']\n target = target['labels']\n\n # compute output\n output = model(input)\n \n distiller.on_post_forward(input, teacher_logits)\n loss = distiller.criterion(output, target)\n\n # measure accuracy and record loss\n prec1 = accuracy(output.data, target, topk=(1,))[0]\n losses.update(loss.data.item(), input.size(0))\n top1.update(prec1.item(), input.size(0))\n\n # compute gradient and do SGD step\n distiller.optimizer.zero_grad()\n loss.backward()\n distiller.optimizer.step()\n scheduler.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'LR {scheduler._last_lr[0]:.6f}'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n loss=losses, top1=top1, scheduler=scheduler))\n\n distiller.on_epoch_end()\n # remember best prec@1 and save checkpoint\n is_best = distiller.best_score > best_prec1\n best_prec1 = max(distiller.best_score, best_prec1)\n save_checkpoint({\n 'epoch': distiller._epoch_runned + 1,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n }, is_best)\n # log to TensorBoard\n if args.tensorboard:\n log_value('train_loss', losses.avg, epoch)\n log_value('train_acc', top1.avg, epoch)\n log_value('learning_rate', scheduler._last_lr[0], epoch)\n \n\ndef validate(val_loader, model, distiller):\n \"\"\"Perform validation on the validation set\"\"\"\n batch_time = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n # compute output\n with torch.no_grad():\n output = model(input)\n\n # measure accuracy\n prec1 = accuracy(output.data, target, topk=(1,))[0]\n top1.update(prec1.item(), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time,\n top1=top1))\n\n print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))\n # log to TensorBoard\n if args.tensorboard:\n log_value('val_acc', top1.avg, distiller._epoch_runned)\n return top1.avg\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n \"\"\"Saves checkpoint to disk\"\"\"\n directory = \"runs/%s/\"%(args.name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.utils.data.DataLoader",
"numpy.load",
"torch.cuda.manual_seed_all",
"torch.load",
"torch.manual_seed",
"torch.save",
"numpy.random.seed",
"torch.no_grad",
"numpy.array"
]
] |
ColinRTaylor/evalml | [
"5b372d0dfac05ff9b7e41eb494a9df1bf2da4a9d"
] | [
"evalml/tests/integration_tests/test_data_checks_and_actions_integration.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\nimport woodwork as ww\nfrom pandas.testing import assert_frame_equal, assert_series_equal\n\nfrom evalml.automl import get_default_primary_search_objective\nfrom evalml.data_checks import DefaultDataChecks, OutliersDataCheck\nfrom evalml.data_checks.invalid_target_data_check import InvalidTargetDataCheck\nfrom evalml.data_checks.null_data_check import NullDataCheck\nfrom evalml.pipelines import BinaryClassificationPipeline\nfrom evalml.pipelines.components import (\n DropColumns,\n DropRowsTransformer,\n TargetImputer,\n)\nfrom evalml.pipelines.components.transformers.imputers.per_column_imputer import (\n PerColumnImputer,\n)\nfrom evalml.pipelines.multiclass_classification_pipeline import (\n MulticlassClassificationPipeline,\n)\nfrom evalml.pipelines.regression_pipeline import RegressionPipeline\nfrom evalml.pipelines.utils import make_pipeline_from_data_check_output\n\n\ndef test_data_checks_with_healthy_data(X_y_binary):\n # Checks do not return any error.\n X, y = X_y_binary\n data_check = DefaultDataChecks(\n \"binary\", get_default_primary_search_objective(\"binary\")\n )\n data_checks_output = data_check.validate(X, y)\n\n assert make_pipeline_from_data_check_output(\n \"binary\", data_checks_output\n ) == BinaryClassificationPipeline(component_graph={}, parameters={}, random_seed=0)\n\n\ndef test_data_checks_suggests_drop_and_impute_cols():\n X = pd.DataFrame(\n {\n \"null_with_categorical\": [\"a\", None, \"b\", \"c\", \"c\"],\n \"lots_of_null\": [None, 7, None, 3, 5],\n \"all_null\": [None, None, None, None, None],\n \"no_null\": [1, 2, 3, 4, 5],\n }\n )\n X.ww.init(logical_types={\"null_with_categorical\": \"categorical\"})\n y = pd.Series([1, 0, 0, 1, 1])\n data_check = NullDataCheck()\n data_checks_output = data_check.validate(X, y)\n\n action_pipeline = make_pipeline_from_data_check_output(\"binary\", data_checks_output)\n assert action_pipeline == BinaryClassificationPipeline(\n component_graph={\n \"Per Column Imputer\": [PerColumnImputer, \"X\", \"y\"],\n \"Drop Columns Transformer\": [\n DropColumns,\n \"Per Column Imputer.x\",\n \"y\",\n ],\n },\n parameters={\n \"Per Column Imputer\": {\n \"impute_strategies\": {\n \"null_with_categorical\": {\"impute_strategy\": \"most_frequent\"},\n \"lots_of_null\": {\"impute_strategy\": \"mean\"},\n },\n \"default_impute_strategy\": \"most_frequent\",\n },\n \"Drop Columns Transformer\": {\"columns\": [\"all_null\"]},\n },\n random_seed=0,\n )\n X_expected = pd.DataFrame(\n {\n \"null_with_categorical\": [\"a\", \"c\", \"b\", \"c\", \"c\"],\n \"lots_of_null\": [5, 7, 5, 3, 5],\n \"no_null\": [1, 2, 3, 4, 5],\n }\n )\n X_expected.ww.init(\n logical_types={\"lots_of_null\": \"double\", \"null_with_categorical\": \"categorical\"}\n )\n action_pipeline.fit(X, y)\n X_t = action_pipeline.transform(X, y)\n assert_frame_equal(X_expected, X_t)\n\n\[email protected](\"problem_type\", [\"binary\", \"multiclass\", \"regression\"])\ndef test_data_checks_impute_cols(problem_type):\n X = pd.DataFrame()\n if problem_type == \"binary\":\n y = ww.init_series(pd.Series([0, 1, 1, None, None]))\n objective = \"Log Loss Binary\"\n expected_pipeline_class = BinaryClassificationPipeline\n y_expected = ww.init_series(pd.Series([0, 1, 1, 1, 1]), logical_type=\"double\")\n\n elif problem_type == \"multiclass\":\n y = ww.init_series(pd.Series([0, 1, 2, 2, None]))\n objective = \"Log Loss Multiclass\"\n expected_pipeline_class = MulticlassClassificationPipeline\n y_expected = ww.init_series(pd.Series([0, 1, 2, 2, 2]), logical_type=\"double\")\n\n else:\n y = ww.init_series(pd.Series([0, 0.1, 0.2, None, None]))\n objective = \"R2\"\n expected_pipeline_class = RegressionPipeline\n y_expected = ww.init_series(\n pd.Series([0, 0.1, 0.2, 0.1, 0.1]), logical_type=\"double\"\n )\n data_check = InvalidTargetDataCheck(problem_type, objective)\n data_checks_output = data_check.validate(None, y)\n\n action_pipeline = make_pipeline_from_data_check_output(\n problem_type, data_checks_output\n )\n expected_parameters = (\n {\"Target Imputer\": {\"impute_strategy\": \"mean\", \"fill_value\": None}}\n if problem_type == \"regression\"\n else {\n \"Target Imputer\": {\"impute_strategy\": \"most_frequent\", \"fill_value\": None}\n }\n )\n assert action_pipeline == expected_pipeline_class(\n component_graph={\"Target Imputer\": [TargetImputer, \"X\", \"y\"]},\n parameters=expected_parameters,\n random_seed=0,\n )\n\n action_pipeline.fit(X, y)\n _, y_t = action_pipeline.transform(X, y)\n assert_series_equal(y_expected, y_t)\n\n\ndef test_data_checks_suggests_drop_rows():\n a = np.arange(10) * 0.01\n data = np.tile(a, (100, 10))\n\n X = pd.DataFrame(data=data)\n X.iloc[0, 3] = 1000\n X.iloc[3, 25] = 1000\n X.iloc[5, 55] = 10000\n X.iloc[10, 72] = -1000\n X.iloc[:, 90] = \"string_values\"\n y = pd.Series(np.tile([0, 1], 50))\n\n outliers_check = OutliersDataCheck()\n data_checks_output = outliers_check.validate(X)\n\n action_pipeline = make_pipeline_from_data_check_output(\"binary\", data_checks_output)\n assert action_pipeline == BinaryClassificationPipeline(\n component_graph={\"Drop Rows Transformer\": [DropRowsTransformer, \"X\", \"y\"]},\n parameters={\"Drop Rows Transformer\": {\"indices_to_drop\": [0, 3, 5, 10]}},\n random_seed=0,\n )\n\n X_expected = X.drop([0, 3, 5, 10])\n X_expected.ww.init()\n y_expected = y.drop([0, 3, 5, 10])\n\n action_pipeline.fit(X, y)\n X_t, y_t = action_pipeline.transform(X, y)\n assert_frame_equal(X_expected, X_t)\n assert_series_equal(y_expected, y_t)\n"
] | [
[
"numpy.tile",
"pandas.Series",
"pandas.DataFrame",
"numpy.arange",
"pandas.testing.assert_series_equal",
"pandas.testing.assert_frame_equal"
]
] |
markvilar/Cardinal | [
"a3d87d34ed253a7a4400ed056c5d59c20f15973b"
] | [
"Python/filter_dvl.py"
] | [
"import argparse\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nplt.style.use(\"./Styles/Scientific.mplstyle\")\n\nfrom typing import Dict, List\n\nimport data\nimport filters\nimport utilities\nimport utm\n\ndef filter_dvl(data_config: data.DataConfiguration, \\\n filter_config: filters.FilterConfiguration):\n \"\"\"\n \"\"\"\n # Read data.\n data = pd.read_csv(data_config.input)\n\n # Extract relevant data for filtering.\n time = data[\"Epoch\"].to_numpy()\n altitude = data[\"Altitude\"].to_numpy()\n\n # Calculate sampling frequency.\n filter_config.sample_frequency = 1 / np.mean(time[1:] - time[0:-1])\n\n # Add end values.\n filtered_altitude = filters.add_appendage(altitude, filter_config)\n\n # Filter data and account for time delay.\n filtered_altitude, filter_delay = filters.FIR_filter(filtered_altitude, \\\n filter_config, axis=1)\n\n filtered_time = time - filter_delay\n\n print(\"\\nDVL:\")\n print(\" - Sampling time: {0:.4f}\".format( \\\n 1 / filter_config.sample_frequency))\n print(\" - Sampling frequency: {0:.4f}\".format( \\\n filter_config.sample_frequency))\n print(\" - Filter time delay: {0:.4f}\".format(filter_delay))\n\n # Remove end values.\n filtered_altitude = filters.remove_appendage(filtered_altitude, \\\n filter_config)\n\n filtered_data = pd.DataFrame()\n filtered_data[\"Epoch\"] = filtered_time\n filtered_data[\"Altitude\"] = filtered_altitude\n\n # Datetime calculations.\n times = []\n for epoch in filtered_data[\"Epoch\"]:\n time = datetime.datetime.fromtimestamp(epoch).strftime( \\\n data_config.datetime_format)\n times.append(time)\n\n filtered_data[\"Datetime\"] = np.array(times, dtype=str)\n\n # Save data.\n if data_config.save_output:\n filtered_data = pd.DataFrame(filtered_data)\n filtered_data.to_csv(data_config.output + \"ROV-DVL.csv\", sep=',')\n\ndef main():\n # Parse arguments.\n parser = argparse.ArgumentParser( \\\n description=\"Filter DVL data with a FIR lowpass filter.\")\n parser.add_argument(\"input\", type=str, help=\"Input file path.\")\n parser.add_argument(\"output\", type=str, help=\"Output directory path.\") \n parser.add_argument(\"order\", type=int, help=\"Filter order.\")\n parser.add_argument(\"cutoff\", type=float, help=\"Filter cutoff.\")\n parser.add_argument(\"appendage\", type=int, help=\"Filter appendage.\")\n parser.add_argument('--show_figures', type=bool, default=False, \\\n help= \"Show figures.\", action=argparse.BooleanOptionalAction)\n parser.add_argument('--save_figures', type=bool, default=False, \\\n help= \"Save figures.\", action=argparse.BooleanOptionalAction)\n parser.add_argument('--save_output', type=bool, default=False, \\\n help= \"Save output.\", action=argparse.BooleanOptionalAction)\n args = parser.parse_args()\n\n # Data configuration.\n data_config = data.DataConfiguration(args.input, args.output, \\\n args.show_figures, args.save_figures, args.save_output)\n\n # Filter configuration.\n filter_config = filters.FilterConfiguration(args.order, args.cutoff, \\\n args.appendage)\n\n # Filter data.\n filter_dvl(data_config, filter_config)\n \nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.style.use",
"pandas.read_csv",
"pandas.DataFrame",
"matplotlib.use",
"numpy.array",
"numpy.mean"
]
] |
natalia-rubio/py_grama | [
"968c1c0238d7165de3b1b96534791feacc4aa960"
] | [
"docs/scripts/ex_sinews.py"
] | [
"import grama as gr\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom grama.models import make_cantilever_beam\nmd_beam = make_cantilever_beam()\n\nmd_beam >> \\\n gr.ev_sinews(n_density=50, n_sweeps=10, df_det=\"nom\", skip=True) >> \\\n gr.pt_auto()\nplt.savefig(\"../images/ex_beam_sinews_doe.png\")\n\nmd_beam >> \\\n gr.ev_sinews(n_density=50, n_sweeps=10, df_det=\"nom\", skip=False) >> \\\n gr.pt_auto()\nplt.savefig(\"../images/ex_beam_sinews_res.png\")\n"
] | [
[
"matplotlib.pyplot.savefig"
]
] |
ahmed-f-alrefaie/forecaster | [
"25b73a533f6195f3e5c703730e63cb3e242c649a"
] | [
"forecaster/func.py"
] | [
"import numpy as np\nfrom scipy.stats import norm, truncnorm\nfrom numpy.random import default_rng\n\n\n### fix the number of different populations\nn_pop = 4\n\ndef pick_random_hyper(all_hyper, sample_size=None):\n\trng = default_rng()\n\tsize = sample_size or all_hyper.shape[0]\n\treturn rng.choice(all_hyper, size=sample_size, replace=False)\n\n\n\n\n\ndef indicate(M, trans, i):\n\t'''\n\tindicate which M belongs to population i given transition parameter\n\t'''\n\tts = np.insert(np.insert(trans, n_pop-1, np.inf), 0, -np.inf)\n\treturn (M>=ts[i]) & (M<ts[i+1])\n\ndef indicate_II(M, trans, i):\n\n\treturn (M>=trans[...,i]) & (M<trans[...,i+1])\n\n\n\ndef split_hyper_linear(hyper):\n\t'''\n\tsplit hyper and derive c\n\t'''\n\tc0, slope,sigma, trans = \\\n\thyper[0], hyper[1:1+n_pop], hyper[1+n_pop:1+2*n_pop], hyper[1+2*n_pop:]\n\n\tc = np.zeros_like(slope)\n\tc[0] = c0\n\tfor i in range(1,n_pop):\n\t\tc[i] = c[i-1] + trans[i-1]*(slope[i-1]-slope[i])\n\n\treturn c, slope, sigma, trans\n\n\ndef split_hyper_linear_II(hyper):\n\t'''\n\tsplit hyper and derive c\n\t'''\n\tc0, slope,sigma, trans = \\\n\thyper[...,0], hyper[...,1:1+n_pop], hyper[...,1+n_pop:1+2*n_pop], hyper[...,1+2*n_pop:]\n\n\tc = np.zeros_like(slope)\n\tc[...,0] = c0\n\tfor i in range(1,n_pop):\n\t\tc[...,i] = c[...,i-1] + trans[...,i-1]*(slope[...,i-1]-slope[...,i])\n\ttrans = np.insert(np.insert(trans,n_pop-1,np.inf,axis=1), 0, -np.inf, axis=1)\n\treturn c, slope, sigma, trans\n\n\ndef piece_linear_II(hyper, M, prob_R):\n\tc, slope, sigma, trans = split_hyper_linear_II(hyper)\n\n\tM = M\n\n\tR = np.zeros_like(M)\n\n\tfor i in range(n_pop):\n\t\tind = indicate_II(M, trans, i)\n\t\tmu = c[...,i]\n\t\tmu[ind] += M[ind]*slope[ind,i]\n\t\tR[ind] = norm.ppf(prob_R[ind],mu[ind],sigma[ind,i])\n\n\treturn R\n\ndef generate_mass(mean, std, sample_size):\n\tmlower = 3e-4\n\tmupper = 3e5\n\treturn truncnorm.rvs( (mlower-mean)/std, (mupper-mean)/std, loc=mean, scale=std, size=sample_size)\t\n\n\ndef piece_linear(hyper, M, prob_R):\n\t'''\n\tmodel: straight line\n\t'''\n\n\tM = np.array(M)\n\tc, slope, sigma, trans = split_hyper_linear(hyper)\n\tR = np.zeros_like(M)\n\n\tfor i in range(4):\n\t\tind = indicate(M, trans, i)\n\n\t\tmu = c[i] + M[ind]*slope[i]\n\t\tR[ind] = norm.ppf(prob_R[ind], mu, sigma[i])\n\n\treturn R\n\n\ndef ProbRGivenM(radii, M, hyper):\n\t'''\n\tp(radii|M)\n\t'''\n\tc, slope, sigma, trans = split_hyper_linear(hyper)\n\tprob = np.zeros_like(M)\n\t#print('SHAPE', prob.shape, M.shape, slope.shape)\n\tfor i in range(4):\n\t\tind = indicate(M, trans, i)\n\t\t#print('MSHAPE',M[ind].shape)\n\t\tmu = c[i] + M[ind]*slope[i]\n\t\t#print('EXPECTED',mu)\n\t\tsig = sigma[i]\n\t\tprob[ind] = norm.pdf(radii, mu, sig)\n\n\tprob = prob/np.sum(prob)\n\n\treturn prob\n\ndef ProbRGivenM_II(radii, M, hyper):\n\tc, slope, sigma, trans = split_hyper_linear_II(hyper)\n\t# 10, 100\n\tprob = np.zeros(shape=(radii.shape[0], M.shape[0]))\n\tmu = np.zeros_like(prob)\n\tfor i in range(n_pop):\n\t\tmu[...] = 0.0\n\t\tind = indicate_II(M[None,...], trans[:,None,:], i)\n\t\tradii_id,mass_id = np.where(ind)\n\t\t#\n\t\tmu[radii_id, mass_id] = c[radii_id,i] + slope[radii_id,i]*M[mass_id]#M[None,...]*slope[:,None,i][ind]\n\t\t#print(mu[0])\n\t\tprob[ind] = norm.pdf(radii[radii_id],mu[radii_id, mass_id],sigma[radii_id,i])\n\t#print('C',c[:,None,i])\n\treturn (prob/np.sum(prob, axis=1)[:,None])\n\ndef random_choice_2d(arr, probs):\n\tidx = (probs.cumsum(1) > np.random.rand(probs.shape[0])[:,None]).argmax(1)\n\treturn arr[idx]\n\n\n\ndef classification( logm, trans ):\n\t'''\n\tclassify as four worlds\n\t'''\n\tcount = np.zeros(4)\n\tsample_size = len(logm)\n\tts = np.insert(np.insert(trans, n_pop-1, np.inf), 0, -np.inf)\n\tfor iclass in range(4):\n\t\t\n\t\tind = indicate_II( logm, ts, iclass)\n\t\tcount[iclass] = count[iclass] + ind.sum()\n\t\n\tprob = count / np.sum(count) * 100.\n\tprint ('Terran %(T).1f %%, Neptunian %(N).1f %%, Jovian %(J).1f %%, Star %(S).1f %%' \\\n\t\t\t% {'T': prob[0], 'N': prob[1], 'J': prob[2], 'S': prob[3]})\n\treturn None"
] | [
[
"numpy.zeros_like",
"numpy.sum",
"numpy.random.default_rng",
"numpy.zeros",
"scipy.stats.norm.ppf",
"scipy.stats.norm.pdf",
"numpy.insert",
"numpy.random.rand",
"numpy.array",
"numpy.where",
"scipy.stats.truncnorm.rvs"
]
] |
drkostas/SemiSeg-Contrastive | [
"af6b133400368911ef77f401b7673894fe6aa05c"
] | [
"utils/transformsgpu.py"
] | [
"'''\nCode taken from https://github.com/WilhelmT/ClassMix\nSlightly modified\n'''\n\nimport kornia\nimport torch\nimport random\nimport torch.nn as nn\n\n\ndef normalize_rgb(data, dataset):\n \"\"\"\n\n Args:\n data: data to normalize BxCxWxH\n dataset: name of the dataset to normalize\n\n Returns:\n normalized data as (x-mean)/255\n\n \"\"\"\n if dataset == 'pascal_voc':\n mean = (122.6789143, 116.66876762, 104.00698793) # rgb\n elif dataset == 'cityscapes':\n mean = (73.15835921, 82.90891754, 72.39239876) # rgb\n else:\n mean = (127.5, 127.5, 127.5 )\n\n mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()\n data_norm = ((data-mean)/255.0)\n return data_norm\n\n\ndef normalize_bgr(data, dataset):\n \"\"\"\n\n Args:\n data: data to normalize BxCxWxH\n dataset: name of the dataset to normalize\n\n Returns:\n normalized data as (x-mean)/255\n\n \"\"\"\n if dataset == 'pascal_voc':\n mean = (104.00698793, 116.66876762, 122.6789143) # bgr\n elif dataset == 'cityscapes':\n mean = (72.39239876, 82.90891754, 73.15835921) # bgr\n else:\n mean = (127.5, 127.5, 127.5 )\n\n mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()\n data_norm = ((data-mean)/255.0)\n return data_norm\n\n\n\ndef grayscale(grayscale, data = None, target = None, probs = None):\n \"\"\"\n\n Args:\n grayscale: boolean whether to apply grayscale augmentation\n data: input data to augment BxCxWxH\n target: labels to augment BxWxH\n probs: probability masks to augment BxCxWxH\n\n Returns:\n data is converted from rgb to grayscale if [grayscale] is True\n target and probs are also returned with no modifications applied\n\n \"\"\"\n if not (data is None):\n if grayscale and data.shape[1]==3:\n seq = nn.Sequential(kornia.augmentation.RandomGrayscale(p=1.) )\n data = seq(data)\n return data, target, probs\n\ndef colorJitter(colorJitter, data = None, target = None, s=0.1, probs = None):\n \"\"\"\n\n Args:\n colorJitter: boolean whether to apply colorJitter augmentation\n data: input data to augment BxCxWxH\n target: labels to augment BxWxH\n probs: probability masks to augment BxCxWxH\n s: brightness and contrast strength of the color jitter\n\n Returns:\n colorJitter is applied to data if [colorJitter] is True\n target and probs are also returned with no modifications applied\n\n\n \"\"\"\n if not (data is None):\n if colorJitter and data.shape[1]==3:\n seq = nn.Sequential(kornia.augmentation.ColorJitter(brightness=s,contrast=s,saturation=s/2.,hue=s/3.))\n data = seq(data/255.)*255. # assumes [0,1]\n return data, target, probs\n\ndef gaussian_blur(blur, data = None, target = None, min_sigma=0.2, max_sigma=3, probs = None):\n \"\"\"\n\n Args:\n blur: boolean whether to apply blur\n data: input data to augment BxCxWxH\n target: labels to augment BxWxH\n probs: probability masks to augment BxCxWxH\n min_sigma: minimum sigma value for the gaussian blur\n max_sigma: maximum sigma value for the gaussian blur\n\n Returns:\n gaussian blur is applied to data if [blur] is True\n target and probs are also returned with no modifications applied\n\n \"\"\"\n if not (data is None):\n if blur and data.shape[1]==3:\n seq = nn.Sequential(kornia.filters.GaussianBlur2d(kernel_size=(23, 23), sigma=(min_sigma, max_sigma)))\n data = seq(data)\n return data, target, probs\n\ndef flip(flip, data = None, target = None, probs = None):\n \"\"\"\n\n Args:\n flip: boolean whether to apply flip augmentation\n data: input data to augment BxCxWxH\n target: labels to augment BxWxH\n probs: probability masks to augment BxCxWxH\n\n Returns:\n data, target and probs are flipped if the boolean flip is True\n\n \"\"\"\n if flip:\n if not (data is None): data = torch.flip(data,(3,))\n if not (target is None):\n target = torch.flip(target,(2,))\n if not (probs is None):\n probs = torch.flip(probs,(2,))\n return data, target, probs\n\ndef solarize(solarize, data = None, target = None, probs = None):\n \"\"\"\n\n Args:\n solarize: boolean whether to apply solarize augmentation\n data: input data to augment BxCxWxH\n target: labels to augment BxWxH\n probs: probability masks to augment BxCxWxH\n\n Returns:\n data, target, probs, where\n data is solarized if [solarize] is True\n\n \"\"\"\n if not (data is None):\n if solarize and data.shape[1]==3:\n seq = nn.Sequential(kornia.augmentation.RandomSolarize((0, 1)))\n data = seq(data.cpu()/255.).cuda()*255.\n return data, target, probs\n\n\n\n\ndef mix(mask, data = None, target = None, probs = None):\n \"\"\"\n Applies classMix augmentation:\n https://openaccess.thecvf.com/content/WACV2021/papers/Olsson_ClassMix_Segmentation-Based_Data_Augmentation_for_Semi-Supervised_Learning_WACV_2021_paper.pdf\n Args:\n mask: masks for applying ClassMix. A list of B elements of CxWxH tensors\n data: input data to augment BxCxWxH\n target: labels to augment BxWxH\n probs: probability masks to augment BxCxWxH\n\n Returns:\n data, target and probs augmented with classMix\n\n \"\"\"\n if not (data is None):\n if mask.shape[0] == data.shape[0]:\n data = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * data[i] + mask[(i + 1) % data.shape[0]] * data[(i + 1) % data.shape[0]]).unsqueeze(0) for i in range(data.shape[0])])\n\n if not (target is None):\n target = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * target[i] + mask[(i + 1) % data.shape[0]] * target[(i + 1) % target.shape[0]]).unsqueeze(0) for i in range(target.shape[0])])\n\n if not (probs is None):\n probs = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * probs[i] + mask[(i + 1) % data.shape[0]] * probs[(i + 1) % probs.shape[0]]).unsqueeze(0) for i in range(probs.shape[0])])\n\n return data, target, probs\n\n\ndef random_scale_crop(scale, data = None, target = None, ignore_label=255, probs = None):\n \"\"\"\n\n Args:\n scale: scale ratio. Float\n data: input data to augment BxCxWxH\n target: labels to augment BxWxH\n probs: probability masks to augment BxCxWxH\n ignore_label: integeer value that defines the ignore class in the datasets for the labels\n\n Returns:\n data, target and prob, after applied a scaling operation. output resolution is preserve as the same as the input resolution WxH\n \"\"\"\n if scale != 1:\n init_size_w = data.shape[2]\n init_size_h = data.shape[3]\n\n # scale data, labels and probs\n data = nn.functional.interpolate(data, scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True)\n if target is not None:\n target = nn.functional.interpolate(target.unsqueeze(1).float(), scale_factor=scale, mode='nearest', recompute_scale_factor=True).long().squeeze(1)\n if probs is not None:\n probs = nn.functional.interpolate(probs.unsqueeze(1), scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True).squeeze(1)\n\n final_size_w = data.shape[2]\n final_size_h = data.shape[3]\n diff_h = init_size_h - final_size_h\n diff_w = init_size_w - final_size_w\n if scale < 1: # add padding if needed\n if diff_h % 2 == 1:\n pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), 0)\n else:\n pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), 0)\n\n data = pad(data)\n if probs is not None:\n probs = pad(probs)\n\n # padding with ignore label to add to labels\n if diff_h % 2 == 1:\n pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), ignore_label)\n else:\n pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), ignore_label)\n\n if target is not None:\n target = pad(target)\n\n else: # crop if needed\n w = random.randint(0, data.shape[2] - init_size_w)\n h = random.randint(0, data.shape[3] - init_size_h)\n data = data [:,:,h:h+init_size_h,w:w + init_size_w]\n if probs is not None:\n probs = probs [:,h:h+init_size_h,w:w + init_size_w]\n if target is not None:\n target = target [:,h:h+init_size_h,w:w + init_size_w]\n\n return data, target, probs\n\n\n"
] | [
[
"torch.Tensor",
"torch.flip",
"torch.nn.functional.interpolate",
"torch.nn.ConstantPad2d"
]
] |
krowck/ISDA-NCjDE-HJ | [
"44c33ba12542a88eaa39fe2b72398ffd7b439372"
] | [
"NDBSCANjDE/CF3.py"
] | [
"###############################################################################\n# Version: 1.1\n# Last modified on: 3 April, 2016 \n# Developers: Michael G. Epitropakis\n# email: m_(DOT)_epitropakis_(AT)_lancaster_(DOT)_ac_(DOT)_uk \n###############################################################################\nfrom cfunction import *\nimport numpy as np\n\nclass CF3(CFunction):\n\tdef __init__(self, dim):\n\t\tsuper(CF3, self).__init__(dim, 6)\n\n\t\t# Initialize data for composition\n\t\tself._CFunction__sigma_ = np.array( [1.0, 1.0, 2.0, 2.0, 2.0, 2.0] )\n\t\tself._CFunction__bias_ = np.zeros( self._CFunction__nofunc_ )\n\t\tself._CFunction__weight_ = np.zeros( self._CFunction__nofunc_ )\n\t\tself._CFunction__lambda_ = np.array( [1.0/4.0, 1.0/10.0, 2.0, 1.0, 2.0, 5.0] )\n\t\t\n\t\t# Lower/Upper Bounds\n\t\tself._CFunction__lbound_ = -5.0 * np.ones( dim )\n\t\tself._CFunction__ubound_ = 5.0 * np.ones( dim )\n\n\t\t# Load optima\n\t\to = np.loadtxt('data/optima.dat') \n\t\tif o.shape[1] >= dim:\n\t\t\tself._CFunction__O_ = o[:self._CFunction__nofunc_, :dim]\n\t\telse: # randomly initialize\n\t\t\tself._CFunction__O_ = self._CFunction__lbound_ + (self._CFunction__ubound_ - self._CFunction__lbound_) * np.random.rand( (self._CFunction__nofunc_, dim) )\n\n\t\t# Load M_: Rotation matrices\n\t\tif dim == 2 or dim == 3 or dim == 5 or dim == 10 or dim == 20:\n\t\t\tfname = \"data/CF3_M_D\" + str(dim) + \".dat\"\n\t\t\tself._CFunction__load_rotmat(fname)\n\t\telse:\n\t\t\t# M_ Identity matrices # TODO: Generate dimension independent rotation matrices\n\t\t\tself._CFunction__M_ = [ np.eye(dim) ] * self._CFunction__nofunc_\n\n\t\t# Initialize functions of the composition\n\t\tself._CFunction__function_ = {0:FEF8F2, 1:FEF8F2, 2:FWeierstrass, 3:FWeierstrass, 4:FGrienwank, 5:FGrienwank}\n\n\t\t# Calculate fmaxi\n\t\tself._CFunction__calculate_fmaxi()\n\n\tdef evaluate(self, x):\n\t\treturn self._CFunction__evaluate_inner_(x)\n"
] | [
[
"numpy.ones",
"numpy.eye",
"numpy.zeros",
"numpy.random.rand",
"numpy.array",
"numpy.loadtxt"
]
] |
13rianlucero/CrabAgePrediction | [
"92bc7fbe1040f49e820473e33cc3902a5a7177c7",
"92bc7fbe1040f49e820473e33cc3902a5a7177c7",
"d73a6b7f68d7bab25d134d3f85c6b63a86c206c5"
] | [
"crabageprediction/venv/Lib/site-packages/pandas/tests/extension/base/dim2.py",
"crabageprediction/venv/Lib/site-packages/pandas/tests/frame/methods/test_values.py",
"crabageprediction/venv/Lib/site-packages/numpy/f2py/tests/test_compile_function.py"
] | [
"\"\"\"\nTests for 2D compatibility.\n\"\"\"\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.missing import is_matching_na\n\nimport pandas as pd\nfrom pandas.core.arrays.integer import INT_STR_TO_DTYPE\nfrom pandas.tests.extension.base.base import BaseExtensionTests\n\n\nclass Dim2CompatTests(BaseExtensionTests):\n def test_transpose(self, data):\n arr2d = data.repeat(2).reshape(-1, 2)\n shape = arr2d.shape\n assert shape[0] != shape[-1] # otherwise the rest of the test is useless\n\n assert arr2d.T.shape == shape[::-1]\n\n def test_frame_from_2d_array(self, data):\n arr2d = data.repeat(2).reshape(-1, 2)\n\n df = pd.DataFrame(arr2d)\n expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]})\n self.assert_frame_equal(df, expected)\n\n def test_swapaxes(self, data):\n arr2d = data.repeat(2).reshape(-1, 2)\n\n result = arr2d.swapaxes(0, 1)\n expected = arr2d.T\n self.assert_extension_array_equal(result, expected)\n\n def test_delete_2d(self, data):\n arr2d = data.repeat(3).reshape(-1, 3)\n\n # axis = 0\n result = arr2d.delete(1, axis=0)\n expected = data.delete(1).repeat(3).reshape(-1, 3)\n self.assert_extension_array_equal(result, expected)\n\n # axis = 1\n result = arr2d.delete(1, axis=1)\n expected = data.repeat(2).reshape(-1, 2)\n self.assert_extension_array_equal(result, expected)\n\n def test_take_2d(self, data):\n arr2d = data.reshape(-1, 1)\n\n result = arr2d.take([0, 0, -1], axis=0)\n\n expected = data.take([0, 0, -1]).reshape(-1, 1)\n self.assert_extension_array_equal(result, expected)\n\n def test_repr_2d(self, data):\n # this could fail in a corner case where an element contained the name\n res = repr(data.reshape(1, -1))\n assert res.count(f\"<{type(data).__name__}\") == 1\n\n res = repr(data.reshape(-1, 1))\n assert res.count(f\"<{type(data).__name__}\") == 1\n\n def test_reshape(self, data):\n arr2d = data.reshape(-1, 1)\n assert arr2d.shape == (data.size, 1)\n assert len(arr2d) == len(data)\n\n arr2d = data.reshape((-1, 1))\n assert arr2d.shape == (data.size, 1)\n assert len(arr2d) == len(data)\n\n with pytest.raises(ValueError):\n data.reshape((data.size, 2))\n with pytest.raises(ValueError):\n data.reshape(data.size, 2)\n\n def test_getitem_2d(self, data):\n arr2d = data.reshape(1, -1)\n\n result = arr2d[0]\n self.assert_extension_array_equal(result, data)\n\n with pytest.raises(IndexError):\n arr2d[1]\n\n with pytest.raises(IndexError):\n arr2d[-2]\n\n result = arr2d[:]\n self.assert_extension_array_equal(result, arr2d)\n\n result = arr2d[:, :]\n self.assert_extension_array_equal(result, arr2d)\n\n result = arr2d[:, 0]\n expected = data[[0]]\n self.assert_extension_array_equal(result, expected)\n\n # dimension-expanding getitem on 1D\n result = data[:, np.newaxis]\n self.assert_extension_array_equal(result, arr2d.T)\n\n def test_iter_2d(self, data):\n arr2d = data.reshape(1, -1)\n\n objs = list(iter(arr2d))\n assert len(objs) == arr2d.shape[0]\n\n for obj in objs:\n assert isinstance(obj, type(data))\n assert obj.dtype == data.dtype\n assert obj.ndim == 1\n assert len(obj) == arr2d.shape[1]\n\n def test_tolist_2d(self, data):\n arr2d = data.reshape(1, -1)\n\n result = arr2d.tolist()\n expected = [data.tolist()]\n\n assert isinstance(result, list)\n assert all(isinstance(x, list) for x in result)\n\n assert result == expected\n\n def test_concat_2d(self, data):\n left = type(data)._concat_same_type([data, data]).reshape(-1, 2)\n right = left.copy()\n\n # axis=0\n result = left._concat_same_type([left, right], axis=0)\n expected = data._concat_same_type([data] * 4).reshape(-1, 2)\n self.assert_extension_array_equal(result, expected)\n\n # axis=1\n result = left._concat_same_type([left, right], axis=1)\n assert result.shape == (len(data), 4)\n self.assert_extension_array_equal(result[:, :2], left)\n self.assert_extension_array_equal(result[:, 2:], right)\n\n # axis > 1 -> invalid\n msg = \"axis 2 is out of bounds for array of dimension 2\"\n with pytest.raises(ValueError, match=msg):\n left._concat_same_type([left, right], axis=2)\n\n @pytest.mark.parametrize(\"method\", [\"backfill\", \"pad\"])\n def test_fillna_2d_method(self, data_missing, method):\n arr = data_missing.repeat(2).reshape(2, 2)\n assert arr[0].isna().all()\n assert not arr[1].isna().any()\n\n result = arr.fillna(method=method)\n\n expected = data_missing.fillna(method=method).repeat(2).reshape(2, 2)\n self.assert_extension_array_equal(result, expected)\n\n @pytest.mark.parametrize(\"method\", [\"mean\", \"median\", \"var\", \"std\", \"sum\", \"prod\"])\n def test_reductions_2d_axis_none(self, data, method):\n arr2d = data.reshape(1, -1)\n\n err_expected = None\n err_result = None\n try:\n expected = getattr(data, method)()\n except Exception as err:\n # if the 1D reduction is invalid, the 2D reduction should be as well\n err_expected = err\n try:\n result = getattr(arr2d, method)(axis=None)\n except Exception as err2:\n err_result = err2\n\n else:\n result = getattr(arr2d, method)(axis=None)\n\n if err_result is not None or err_expected is not None:\n assert type(err_result) == type(err_expected)\n return\n\n assert is_matching_na(result, expected) or result == expected\n\n @pytest.mark.parametrize(\"method\", [\"mean\", \"median\", \"var\", \"std\", \"sum\", \"prod\"])\n def test_reductions_2d_axis0(self, data, method):\n arr2d = data.reshape(1, -1)\n\n kwargs = {}\n if method == \"std\":\n # pass ddof=0 so we get all-zero std instead of all-NA std\n kwargs[\"ddof\"] = 0\n\n try:\n result = getattr(arr2d, method)(axis=0, **kwargs)\n except Exception as err:\n try:\n getattr(data, method)()\n except Exception as err2:\n assert type(err) == type(err2)\n return\n else:\n raise AssertionError(\"Both reductions should raise or neither\")\n\n def get_reduction_result_dtype(dtype):\n # windows and 32bit builds will in some cases have int32/uint32\n # where other builds will have int64/uint64.\n if dtype.itemsize == 8:\n return dtype\n elif dtype.kind in \"ib\":\n return INT_STR_TO_DTYPE[np.dtype(int).name]\n else:\n # i.e. dtype.kind == \"u\"\n return INT_STR_TO_DTYPE[np.dtype(np.uint).name]\n\n if method in [\"mean\", \"median\", \"sum\", \"prod\"]:\n # std and var are not dtype-preserving\n expected = data\n if method in [\"sum\", \"prod\"] and data.dtype.kind in \"iub\":\n dtype = get_reduction_result_dtype(data.dtype)\n\n expected = data.astype(dtype)\n if data.dtype.kind == \"b\" and method in [\"sum\", \"prod\"]:\n # We get IntegerArray instead of BooleanArray\n pass\n else:\n assert type(expected) == type(data), type(expected)\n assert dtype == expected.dtype\n\n self.assert_extension_array_equal(result, expected)\n elif method == \"std\":\n self.assert_extension_array_equal(result, data - data)\n # punt on method == \"var\"\n\n @pytest.mark.parametrize(\"method\", [\"mean\", \"median\", \"var\", \"std\", \"sum\", \"prod\"])\n def test_reductions_2d_axis1(self, data, method):\n arr2d = data.reshape(1, -1)\n\n try:\n result = getattr(arr2d, method)(axis=1)\n except Exception as err:\n try:\n getattr(data, method)()\n except Exception as err2:\n assert type(err) == type(err2)\n return\n else:\n raise AssertionError(\"Both reductions should raise or neither\")\n\n # not necessarily type/dtype-preserving, so weaker assertions\n assert result.shape == (1,)\n expected_scalar = getattr(data, method)()\n res = result[0]\n assert is_matching_na(res, expected_scalar) or res == expected_scalar\n\n\nclass NDArrayBacked2DTests(Dim2CompatTests):\n # More specific tests for NDArrayBackedExtensionArray subclasses\n\n def test_copy_order(self, data):\n # We should be matching numpy semantics for the \"order\" keyword in 'copy'\n arr2d = data.repeat(2).reshape(-1, 2)\n assert arr2d._ndarray.flags[\"C_CONTIGUOUS\"]\n\n res = arr2d.copy()\n assert res._ndarray.flags[\"C_CONTIGUOUS\"]\n\n res = arr2d[::2, ::2].copy()\n assert res._ndarray.flags[\"C_CONTIGUOUS\"]\n\n res = arr2d.copy(\"F\")\n assert not res._ndarray.flags[\"C_CONTIGUOUS\"]\n assert res._ndarray.flags[\"F_CONTIGUOUS\"]\n\n res = arr2d.copy(\"K\")\n assert res._ndarray.flags[\"C_CONTIGUOUS\"]\n\n res = arr2d.T.copy(\"K\")\n assert not res._ndarray.flags[\"C_CONTIGUOUS\"]\n assert res._ndarray.flags[\"F_CONTIGUOUS\"]\n\n # order not accepted by numpy\n msg = r\"order must be one of 'C', 'F', 'A', or 'K' \\(got 'Q'\\)\"\n with pytest.raises(ValueError, match=msg):\n arr2d.copy(\"Q\")\n\n # neither contiguity\n arr_nc = arr2d[::2]\n assert not arr_nc._ndarray.flags[\"C_CONTIGUOUS\"]\n assert not arr_nc._ndarray.flags[\"F_CONTIGUOUS\"]\n\n assert arr_nc.copy()._ndarray.flags[\"C_CONTIGUOUS\"]\n assert not arr_nc.copy()._ndarray.flags[\"F_CONTIGUOUS\"]\n\n assert arr_nc.copy(\"C\")._ndarray.flags[\"C_CONTIGUOUS\"]\n assert not arr_nc.copy(\"C\")._ndarray.flags[\"F_CONTIGUOUS\"]\n\n assert not arr_nc.copy(\"F\")._ndarray.flags[\"C_CONTIGUOUS\"]\n assert arr_nc.copy(\"F\")._ndarray.flags[\"F_CONTIGUOUS\"]\n\n assert arr_nc.copy(\"K\")._ndarray.flags[\"C_CONTIGUOUS\"]\n assert not arr_nc.copy(\"K\")._ndarray.flags[\"F_CONTIGUOUS\"]\n",
"import numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n NaT,\n Series,\n Timestamp,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDataFrameValues:\n @td.skip_array_manager_invalid_test\n def test_values(self, float_frame):\n float_frame.values[:, 0] = 5.0\n assert (float_frame.values[:, 0] == 5).all()\n\n def test_more_values(self, float_string_frame):\n values = float_string_frame.values\n assert values.shape[1] == len(float_string_frame.columns)\n\n def test_values_mixed_dtypes(self, float_frame, float_string_frame):\n frame = float_frame\n arr = frame.values\n\n frame_cols = frame.columns\n for i, row in enumerate(arr):\n for j, value in enumerate(row):\n col = frame_cols[j]\n if np.isnan(value):\n assert np.isnan(frame[col][i])\n else:\n assert value == frame[col][i]\n\n # mixed type\n arr = float_string_frame[[\"foo\", \"A\"]].values\n assert arr[0, 0] == \"bar\"\n\n df = DataFrame({\"complex\": [1j, 2j, 3j], \"real\": [1, 2, 3]})\n arr = df.values\n assert arr[0, 0] == 1j\n\n def test_values_duplicates(self):\n df = DataFrame(\n [[1, 2, \"a\", \"b\"], [1, 2, \"a\", \"b\"]], columns=[\"one\", \"one\", \"two\", \"two\"]\n )\n\n result = df.values\n expected = np.array([[1, 2, \"a\", \"b\"], [1, 2, \"a\", \"b\"]], dtype=object)\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_values_with_duplicate_columns(self):\n df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=[\"x\", \"x\"])\n result = df.values\n expected = np.array([[1, 2.5], [3, 4.5]])\n assert (result == expected).all().all()\n\n @pytest.mark.parametrize(\"constructor\", [date_range, period_range])\n def test_values_casts_datetimelike_to_object(self, constructor):\n series = Series(constructor(\"2000-01-01\", periods=10, freq=\"D\"))\n\n expected = series.astype(\"object\")\n\n df = DataFrame({\"a\": series, \"b\": np.random.randn(len(series))})\n\n result = df.values.squeeze()\n assert (result[:, 0] == expected.values).all()\n\n df = DataFrame({\"a\": series, \"b\": [\"foo\"] * len(series)})\n\n result = df.values.squeeze()\n assert (result[:, 0] == expected.values).all()\n\n def test_frame_values_with_tz(self):\n tz = \"US/Central\"\n df = DataFrame({\"A\": date_range(\"2000\", periods=4, tz=tz)})\n result = df.values\n expected = np.array(\n [\n [Timestamp(\"2000-01-01\", tz=tz)],\n [Timestamp(\"2000-01-02\", tz=tz)],\n [Timestamp(\"2000-01-03\", tz=tz)],\n [Timestamp(\"2000-01-04\", tz=tz)],\n ]\n )\n tm.assert_numpy_array_equal(result, expected)\n\n # two columns, homogeneous\n\n df[\"B\"] = df[\"A\"]\n result = df.values\n expected = np.concatenate([expected, expected], axis=1)\n tm.assert_numpy_array_equal(result, expected)\n\n # three columns, heterogeneous\n est = \"US/Eastern\"\n df[\"C\"] = df[\"A\"].dt.tz_convert(est)\n\n new = np.array(\n [\n [Timestamp(\"2000-01-01T01:00:00\", tz=est)],\n [Timestamp(\"2000-01-02T01:00:00\", tz=est)],\n [Timestamp(\"2000-01-03T01:00:00\", tz=est)],\n [Timestamp(\"2000-01-04T01:00:00\", tz=est)],\n ]\n )\n expected = np.concatenate([expected, new], axis=1)\n result = df.values\n tm.assert_numpy_array_equal(result, expected)\n\n def test_interleave_with_tzaware(self, timezone_frame):\n\n # interleave with object\n result = timezone_frame.assign(D=\"foo\").values\n expected = np.array(\n [\n [\n Timestamp(\"2013-01-01 00:00:00\"),\n Timestamp(\"2013-01-02 00:00:00\"),\n Timestamp(\"2013-01-03 00:00:00\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00-0500\", tz=\"US/Eastern\"),\n NaT,\n Timestamp(\"2013-01-03 00:00:00-0500\", tz=\"US/Eastern\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00+0100\", tz=\"CET\"),\n NaT,\n Timestamp(\"2013-01-03 00:00:00+0100\", tz=\"CET\"),\n ],\n [\"foo\", \"foo\", \"foo\"],\n ],\n dtype=object,\n ).T\n tm.assert_numpy_array_equal(result, expected)\n\n # interleave with only datetime64[ns]\n result = timezone_frame.values\n expected = np.array(\n [\n [\n Timestamp(\"2013-01-01 00:00:00\"),\n Timestamp(\"2013-01-02 00:00:00\"),\n Timestamp(\"2013-01-03 00:00:00\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00-0500\", tz=\"US/Eastern\"),\n NaT,\n Timestamp(\"2013-01-03 00:00:00-0500\", tz=\"US/Eastern\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00+0100\", tz=\"CET\"),\n NaT,\n Timestamp(\"2013-01-03 00:00:00+0100\", tz=\"CET\"),\n ],\n ],\n dtype=object,\n ).T\n tm.assert_numpy_array_equal(result, expected)\n\n def test_values_interleave_non_unique_cols(self):\n df = DataFrame(\n [[Timestamp(\"20130101\"), 3.5], [Timestamp(\"20130102\"), 4.5]],\n columns=[\"x\", \"x\"],\n index=[1, 2],\n )\n\n df_unique = df.copy()\n df_unique.columns = [\"x\", \"y\"]\n assert df_unique.values.shape == df.values.shape\n tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])\n tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])\n\n def test_values_numeric_cols(self, float_frame):\n float_frame[\"foo\"] = \"bar\"\n\n values = float_frame[[\"A\", \"B\", \"C\", \"D\"]].values\n assert values.dtype == np.float64\n\n def test_values_lcd(self, mixed_float_frame, mixed_int_frame):\n\n # mixed lcd\n values = mixed_float_frame[[\"A\", \"B\", \"C\", \"D\"]].values\n assert values.dtype == np.float64\n\n values = mixed_float_frame[[\"A\", \"B\", \"C\"]].values\n assert values.dtype == np.float32\n\n values = mixed_float_frame[[\"C\"]].values\n assert values.dtype == np.float16\n\n # GH#10364\n # B uint64 forces float because there are other signed int types\n values = mixed_int_frame[[\"A\", \"B\", \"C\", \"D\"]].values\n assert values.dtype == np.float64\n\n values = mixed_int_frame[[\"A\", \"D\"]].values\n assert values.dtype == np.int64\n\n # B uint64 forces float because there are other signed int types\n values = mixed_int_frame[[\"A\", \"B\", \"C\"]].values\n assert values.dtype == np.float64\n\n # as B and C are both unsigned, no forcing to float is needed\n values = mixed_int_frame[[\"B\", \"C\"]].values\n assert values.dtype == np.uint64\n\n values = mixed_int_frame[[\"A\", \"C\"]].values\n assert values.dtype == np.int32\n\n values = mixed_int_frame[[\"C\", \"D\"]].values\n assert values.dtype == np.int64\n\n values = mixed_int_frame[[\"A\"]].values\n assert values.dtype == np.int32\n\n values = mixed_int_frame[[\"C\"]].values\n assert values.dtype == np.uint8\n\n\nclass TestPrivateValues:\n @td.skip_array_manager_invalid_test\n def test_private_values_dt64tz(self):\n dta = date_range(\"2000\", periods=4, tz=\"US/Central\")._data.reshape(-1, 1)\n\n df = DataFrame(dta, columns=[\"A\"])\n tm.assert_equal(df._values, dta)\n\n # we have a view\n assert np.shares_memory(df._values._ndarray, dta._ndarray)\n\n # TimedeltaArray\n tda = dta - dta\n df2 = df - df\n tm.assert_equal(df2._values, tda)\n\n @td.skip_array_manager_invalid_test\n def test_private_values_dt64tz_multicol(self):\n dta = date_range(\"2000\", periods=8, tz=\"US/Central\")._data.reshape(-1, 2)\n\n df = DataFrame(dta, columns=[\"A\", \"B\"])\n tm.assert_equal(df._values, dta)\n\n # we have a view\n assert np.shares_memory(df._values._ndarray, dta._ndarray)\n\n # TimedeltaArray\n tda = dta - dta\n df2 = df - df\n tm.assert_equal(df2._values, tda)\n\n def test_private_values_dt64_multiblock(self, using_array_manager, request):\n if using_array_manager:\n mark = pytest.mark.xfail(reason=\"returns ndarray\")\n request.node.add_marker(mark)\n\n dta = date_range(\"2000\", periods=8)._data\n\n df = DataFrame({\"A\": dta[:4]}, copy=False)\n df[\"B\"] = dta[4:]\n\n assert len(df._mgr.arrays) == 2\n\n result = df._values\n expected = dta.reshape(2, 4).T\n tm.assert_equal(result, expected)\n",
"\"\"\"See https://github.com/numpy/numpy/pull/11937.\n\n\"\"\"\nimport sys\nimport os\nimport uuid\nfrom importlib import import_module\nimport pytest\n\nimport numpy.f2py\n\nfrom numpy.testing import assert_equal\nfrom . import util\n\n\ndef setup_module():\n if not util.has_c_compiler():\n pytest.skip(\"Needs C compiler\")\n if not util.has_f77_compiler():\n pytest.skip('Needs FORTRAN 77 compiler')\n\n\n# extra_args can be a list (since gh-11937) or string.\n# also test absence of extra_args\[email protected](\n \"extra_args\", [['--noopt', '--debug'], '--noopt --debug', '']\n )\[email protected]_references(reason=\"Imported module seems never deleted.\")\ndef test_f2py_init_compile(extra_args):\n # flush through the f2py __init__ compile() function code path as a\n # crude test for input handling following migration from\n # exec_command() to subprocess.check_output() in gh-11937\n\n # the Fortran 77 syntax requires 6 spaces before any commands, but\n # more space may be added/\n fsource = \"\"\"\n integer function foo()\n foo = 10 + 5\n return\n end\n \"\"\"\n # use various helper functions in util.py to enable robust build /\n # compile and reimport cycle in test suite\n moddir = util.get_module_dir()\n modname = util.get_temp_module_name()\n\n cwd = os.getcwd()\n target = os.path.join(moddir, str(uuid.uuid4()) + '.f')\n # try running compile() with and without a source_fn provided so\n # that the code path where a temporary file for writing Fortran\n # source is created is also explored\n for source_fn in [target, None]:\n # mimic the path changing behavior used by build_module() in\n # util.py, but don't actually use build_module() because it has\n # its own invocation of subprocess that circumvents the\n # f2py.compile code block under test\n try:\n os.chdir(moddir)\n ret_val = numpy.f2py.compile(\n fsource,\n modulename=modname,\n extra_args=extra_args,\n source_fn=source_fn\n )\n finally:\n os.chdir(cwd)\n\n # check for compile success return value\n assert_equal(ret_val, 0)\n\n # we are not currently able to import the Python-Fortran\n # interface module on Windows / Appveyor, even though we do get\n # successful compilation on that platform with Python 3.x\n if sys.platform != 'win32':\n # check for sensible result of Fortran function; that means\n # we can import the module name in Python and retrieve the\n # result of the sum operation\n return_check = import_module(modname)\n calc_result = return_check.foo()\n assert_equal(calc_result, 15)\n # Removal from sys.modules, is not as such necessary. Even with\n # removal, the module (dict) stays alive.\n del sys.modules[modname]\n\n\ndef test_f2py_init_compile_failure():\n # verify an appropriate integer status value returned by\n # f2py.compile() when invalid Fortran is provided\n ret_val = numpy.f2py.compile(b\"invalid\")\n assert_equal(ret_val, 1)\n\n\ndef test_f2py_init_compile_bad_cmd():\n # verify that usage of invalid command in f2py.compile() returns\n # status value of 127 for historic consistency with exec_command()\n # error handling\n\n # patch the sys Python exe path temporarily to induce an OSError\n # downstream NOTE: how bad of an idea is this patching?\n try:\n temp = sys.executable\n sys.executable = 'does not exist'\n\n # the OSError should take precedence over invalid Fortran\n ret_val = numpy.f2py.compile(b\"invalid\")\n assert_equal(ret_val, 127)\n finally:\n sys.executable = temp\n\n\[email protected]('fsource',\n ['program test_f2py\\nend program test_f2py',\n b'program test_f2py\\nend program test_f2py',])\ndef test_compile_from_strings(tmpdir, fsource):\n # Make sure we can compile str and bytes gh-12796\n cwd = os.getcwd()\n try:\n os.chdir(str(tmpdir))\n ret_val = numpy.f2py.compile(\n fsource,\n modulename='test_compile_from_strings',\n extension='.f90')\n assert_equal(ret_val, 0)\n finally:\n os.chdir(cwd)\n"
] | [
[
"numpy.dtype",
"pandas.DataFrame",
"pandas._libs.missing.is_matching_na"
],
[
"pandas._testing.assert_numpy_array_equal",
"pandas.date_range",
"numpy.shares_memory",
"pandas.DataFrame",
"pandas._testing.assert_equal",
"numpy.isnan",
"numpy.array",
"numpy.concatenate",
"pandas.Timestamp"
],
[
"numpy.testing.assert_equal"
]
] |
ssh0/growing-string | [
"2e43916e91157dfb4253775149b35ec9d81ef14d"
] | [
"triangular_lattice/diecutting/result_n2.py"
] | [
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#\n# written by Shotaro Fujimoto\n# 2016-12-07\n\nimport matplotlib.pyplot as plt\n# from mpl_toolkits.mplot3d.axes3d import Axes3D\nimport matplotlib.cm as cm\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import gamma\nimport set_data_path\n\n\ndef load_data(_path):\n data = np.load(_path)\n beta = data['beta']\n try:\n size_dist_ave = data['size_dist_ave']\n return load_data_averaged(_path)\n except KeyError:\n pass\n\n num_of_strings = data['num_of_strings']\n frames = data['frames']\n Ls = data['Ls'].astype(np.float)\n # Ls = (3 * Ls * (Ls + 1) + 1)\n size_dist = data['size_dist']\n\n N0 = np.array([l[1] for l in size_dist], dtype=np.float) / num_of_strings\n n0 = N0[1:]\n S = np.array([np.sum(l) for l in size_dist], dtype=np.float) / num_of_strings\n n1 = (S[1:] - n0) * 2.\n\n N = []\n for l in size_dist:\n dot = np.dot(np.arange(len(l)), np.array(l).T)\n N.append(dot)\n # N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist])\n N_all = 3. * Ls * (Ls + 1.) + 1\n N = np.array(N, dtype=np.float) / num_of_strings\n N_minus = N_all - N\n\n N_minus_rate = N_minus / N_all\n\n n_minus = N_minus[1:] - N_minus[:-1]\n\n n1_ave = n1 / np.sum(n1)\n\n n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)\n\n return {\n 'beta': beta,\n 'num_of_strings': num_of_strings,\n 'frames': frames,\n 'Ls': Ls,\n 'N_minus': N_minus,\n 'N_minus_rate': N_minus_rate,\n 'S': S,\n 'n0': n0,\n 'n1': n1,\n 'n2': n2,\n 'n_minus': n_minus,\n 'n1_ave': n1_ave,\n }\n\ndef load_data_averaged(_path):\n data = np.load(_path)\n beta = data['beta']\n num_of_strings = data['num_of_strings']\n frames = data['frames']\n Ls = data['Ls'].astype(np.float)\n # Ls = (3 * Ls * (Ls + 1) + 1)\n # size_dist = data['size_dist']\n size_dist_ave = data['size_dist_ave']\n\n N0 = np.array([l[1] for l in size_dist_ave], dtype=np.float)\n n0 = N0[1:]\n S = np.array([np.sum(l) for l in size_dist_ave], dtype=np.float)\n n1 = (S[1:] - n0) * 2.\n\n N = []\n for l in size_dist_ave:\n dot = np.dot(np.arange(len(l)), np.array(l).T)\n N.append(dot)\n # N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist_ave])\n N_all = 3. * Ls * (Ls + 1.) + 1\n N = np.array(N, dtype=np.float)\n N_minus = N_all - N\n\n N_minus_rate = N_minus / N_all\n\n n_minus = N_minus[1:] - N_minus[:-1]\n\n n1_ave = n1 / np.sum(n1)\n\n n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)\n\n return {\n 'beta': beta,\n 'num_of_strings': num_of_strings,\n 'frames': frames,\n 'Ls': Ls,\n 'N_minus': N_minus,\n 'N_minus_rate': N_minus_rate,\n 'S': S,\n 'n0': n0,\n 'n1': n1,\n 'n2': n2,\n 'n_minus': n_minus,\n 'n1_ave': n1_ave,\n }\n\ndef result_n2(path):\n fig, ax = plt.subplots()\n for i, result_data_path in enumerate(path):\n globals().update(load_data(result_data_path))\n ax.plot(Ls[1:], n2, '.', label=r'$\\beta = %2.2f$' % beta,\n color=cm.viridis(float(i) / len(path)))\n ax.legend(loc='best')\n ax.set_title('Averaged number of the sites on the cutting edges which \\\n is connected to two neighbors.' + \n ' (sample: {})'.format(num_of_strings))\n ax.set_xlabel(r'Cutting size $L$')\n ax.set_ylabel(r'$n_{2}$')\n plt.show()\n\n\nif __name__ == '__main__':\n result_n2(set_data_path.data_path)\n"
] | [
[
"numpy.load",
"numpy.sum",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.array"
]
] |
bilgetutak/pyroms | [
"cd0fe39075825f97a7caf64e2c4c5a19f23302fd",
"cd0fe39075825f97a7caf64e2c4c5a19f23302fd"
] | [
"examples/Yellow_Sea/make_YELLOW_grd_v1.py",
"pyroms_toolbox/pyroms_toolbox/Grid_HYCOM/make_remap_grid_file.py"
] | [
"import os\nfrom pyroms import _iso\nimport numpy as np\nfrom mpl_toolkits.basemap import Basemap, shiftgrid\nfrom scipy.interpolate import griddata\nimport matplotlib.colors as colors\nfrom scipy.signal import medfilt2d\nimport netCDF4\n\nimport pyroms\nfrom bathy_smoother import *\n\n# Grid dimension\nLm = 140\nMm = 120\n\nlon0=117.5 ; lat0 = 41.\nlon1=117.5 ; lat1 = 34.5\nlon2 = 127. ; lat2 = 34.5\nlon3 = 127. ; lat3 = 41.\nmap = Basemap(projection='lcc', lat_0=35., lat_1=30., lat_2=40, lon_0 =123, \\\n width=2000000, height=2000000, resolution='i')\n\nlonp = np.array([lon0, lon1, lon2, lon3])\nlatp = np.array([lat0, lat1, lat2, lat3])\nbeta = np.array([1, 1, 1, 1])\n\n#generate the new grid\n# Do this if you aren't going to move the grid corners interactively.\nhgrd = pyroms.grid.Gridgen(lonp, latp, beta, (Mm+3, Lm+3), proj=map)\n# Do this if you are going to use the Boundary Interactor\n#map.drawcoastlines()\n#xp, yp = map(lonp, latp)\n#bry = pyroms.hgrid.BoundaryInteractor(xp, yp, beta, shp=(Mm+3,Lm+3), proj=map)\n#hgrd=bry.grd\n\nlonv, latv = list(map(hgrd.x_vert, hgrd.y_vert, inverse=True))\nhgrd = pyroms.grid.CGrid_geo(lonv, latv, map)\n\n# generate the mask\n#for verts in map.coastsegs:\n# hgrd.mask_polygon(verts)\n# alternate version from johan.navarro.padron\n\nfor xx,yy in map.coastpolygons:\n xa = np.array(xx, np.float32)\n ya = np.array(yy,np.float32)\n vv = np.zeros((xa.shape[0],2))\n vv[:, 0] = xa\n vv[:, 1] = ya\n hgrd.mask_polygon(vv,mask_value=0)\n\n# Edit the land mask interactively.\n#pyroms.grid.edit_mask_mesh(hgrd, proj=map)\n#edit_mask_mesh_ij is a faster version using imshow... but no map projection.\ncoast = pyroms.utility.get_coast_from_map(map)\npyroms.grid.edit_mask_mesh_ij(hgrd, coast=coast)\n\n\n#### Use the following to interpolate from etopo2 bathymetry.\n# generate the bathy\n# read in topo data (on a regular lat/lon grid)\n# this topo come with basemap so you should have it on your laptop.\n# just update datadir with the appropriate path\n# you can get this data from matplolib svn with\n# svn co https://matplotlib.svn.sourceforge.net/svnroot/matplotlib/trunk/htdocs/screenshots/data/\"\n\ndatadir = 'data/'\ntopo = np.loadtxt(os.path.join(datadir, 'etopo20data.gz'))\nlons = np.loadtxt(os.path.join(datadir, 'etopo20lons.gz'))\nlats = np.loadtxt(os.path.join(datadir, 'etopo20lats.gz'))\n\n# depth positive\ntopo = -topo\n\n# fix minimum depth\nhmin = 5\ntopo = np.where(topo < hmin, hmin, topo)\n\n# interpolate new bathymetry\nlon, lat = np.meshgrid(lons, lats)\nh = griddata((lon.flat,lat.flat),topo.flat,(hgrd.lon_rho,hgrd.lat_rho), method='linear')\n\n# insure that depth is always deeper than hmin\nh = np.where(h < hmin, hmin, h)\n\n# set depth to hmin where masked\nidx = np.where(hgrd.mask_rho == 0)\nh[idx] = hmin\n\n# save raw bathymetry\nhraw = h.copy()\n\n# check bathymetry roughness\nRoughMat = bathy_tools.RoughnessMatrix(h, hgrd.mask_rho)\nprint('Max Roughness value is: ', RoughMat.max())\n\n# smooth the raw bathy using the direct iterative method from Martinho and Batteen (2006)\nrx0_max = 0.35\nh = bathy_smoothing.smoothing_Positive_rx0(hgrd.mask_rho, h, rx0_max)\n\n# check bathymetry roughness again\nRoughMat = bathy_tools.RoughnessMatrix(h, hgrd.mask_rho)\nprint('Max Roughness value is: ', RoughMat.max())\n\n# vertical coordinate\ntheta_b = 2\ntheta_s = 7.0\nTcline = 50\nN = 30\nvgrd = pyroms.vgrid.s_coordinate_4(h, theta_b, theta_s, Tcline, N, hraw=hraw)\n\n# ROMS grid\ngrd_name = 'YELLOW'\ngrd = pyroms.grid.ROMS_Grid(grd_name, hgrd, vgrd)\n\n# write grid to netcdf file\npyroms.grid.write_ROMS_grid(grd, filename='YELLOW_grd_v1.nc')\n",
"import numpy as np\nfrom mpl_toolkits.basemap import pyproj\nfrom datetime import datetime\ntry:\n import netCDF4 as netCDF\nexcept:\n import netCDF3 as netCDF\n\n\ndef make_remap_grid_file(grd):\n\n #create remap file\n remap_filename = 'remap_grid_' + grd.name + '_t.nc'\n nc = netCDF.Dataset(remap_filename, 'w', format='NETCDF3_64BIT')\n nc.Description = 'remap grid file for HYCOM'\n nc.Author = 'pyroms_toolbox.Grid_HYCOM.make_remap_grid_file'\n nc.Created = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n nc.title = grd.name\n\n lon_corner = grd.lon_vert\n lat_corner = grd.lat_vert\n grid_center_lon = grd.lon_t.flatten()\n grid_center_lat = grd.lat_t.flatten()\n Mp, Lp = grd.lon_t.shape\n grid_imask = grd.mask_t[0,:].flatten()\n\n grid_size = Lp * Mp\n\n grid_corner_lon = np.zeros((grid_size, 4))\n grid_corner_lat = np.zeros((grid_size, 4))\n k = 0\n for j in range(Mp):\n for i in range(Lp):\n grid_corner_lon[k,0] = lon_corner[j,i]\n grid_corner_lat[k,0] = lat_corner[j,i]\n grid_corner_lon[k,1] = lon_corner[j,i+1]\n grid_corner_lat[k,1] = lat_corner[j,i+1]\n grid_corner_lon[k,2] = lon_corner[j+1,i+1]\n grid_corner_lat[k,2] = lat_corner[j+1,i+1]\n grid_corner_lon[k,3] = lon_corner[j+1,i]\n grid_corner_lat[k,3] = lat_corner[j+1,i]\n k = k + 1\n\n\n #Write netcdf file\n nc.createDimension('grid_size', grid_size)\n nc.createDimension('grid_corners', 4)\n nc.createDimension('grid_rank', 2)\n\n nc.createVariable('grid_dims', 'i4', ('grid_rank'))\n nc.variables['grid_dims'].long_name = 'grid size along x and y axis'\n nc.variables['grid_dims'].units = 'None'\n nc.variables['grid_dims'][:] = [(Lp, Mp)]\n\n nc.createVariable('grid_center_lon', 'f8', ('grid_size'))\n nc.variables['grid_center_lon'].long_name = 'longitude of cell center'\n nc.variables['grid_center_lon'].units = 'degrees'\n nc.variables['grid_center_lon'][:] = grid_center_lon\n\n nc.createVariable('grid_center_lat', 'f8', ('grid_size'))\n nc.variables['grid_center_lat'].long_name = 'latitude of cell center'\n nc.variables['grid_center_lat'].units = 'degrees'\n nc.variables['grid_center_lat'][:] = grid_center_lat\n\n nc.createVariable('grid_imask', 'i4', ('grid_size'))\n nc.variables['grid_imask'].long_name = 'mask'\n nc.variables['grid_imask'].units = 'None'\n nc.variables['grid_imask'][:] = grid_imask\n\n nc.createVariable('grid_corner_lon', 'f8', ('grid_size', 'grid_corners'))\n nc.variables['grid_corner_lon'].long_name = 'longitude of cell corner'\n nc.variables['grid_corner_lon'].units = 'degrees'\n nc.variables['grid_corner_lon'][:] = grid_corner_lon\n\n nc.createVariable('grid_corner_lat', 'f8', ('grid_size', 'grid_corners'))\n nc.variables['grid_corner_lat'].long_name = 'latitude of cell corner'\n nc.variables['grid_corner_lat'].units = 'degrees'\n nc.variables['grid_corner_lat'][:] = grid_corner_lat\n\n nc.close()\n\n"
] | [
[
"scipy.interpolate.griddata",
"numpy.zeros",
"numpy.array",
"numpy.where",
"numpy.meshgrid"
],
[
"numpy.zeros"
]
] |
alinaselega/gap_statistic | [
"2b94c46b676eef839f7709441a89bdc5796b2d31"
] | [
"tests/test_optimalK.py"
] | [
"# -*- coding: utf-8 -*-\nimport os\nimport pytest\n\nimport numpy as np\nfrom sklearn.datasets import make_blobs\nfrom sklearn.cluster import KMeans, MeanShift\n\nfrom gap_statistic import OptimalK\n\n\ndef test_bad_init_config():\n \"\"\"\n Cannot define own clustering function and try to use Rust backend\n \"\"\"\n with pytest.raises(ValueError):\n OptimalK(parallel_backend=\"rust\", clusterer=lambda x, k: print(\"just testing\"))\n\n\[email protected](\"ClusterModel\", [KMeans, MeanShift])\ndef test_alternative_clusting_method(ClusterModel):\n \"\"\"\n Test that users can supply alternative clustering method as dep injection\n \"\"\"\n\n def clusterer(X: np.ndarray, k: int, another_test_arg):\n \"\"\"\n Function to wrap a sklearn model as a clusterer for OptimalK\n First two arguments are always the data matrix, and k, and can supply\n \"\"\"\n m = ClusterModel()\n m.fit(X)\n assert another_test_arg == \"test\"\n return m.cluster_centers_, m.predict(X)\n\n optimalk = OptimalK(\n n_jobs=-1,\n parallel_backend=\"joblib\",\n clusterer=clusterer,\n clusterer_kwargs={\"another_test_arg\": \"test\"},\n )\n X, y = make_blobs(n_samples=50, n_features=2, centers=3)\n n_clusters = optimalk(X, n_refs=3, cluster_array=np.arange(1, 5))\n assert isinstance(n_clusters, int)\n\n\[email protected](\n \"parallel_backend, n_jobs, n_clusters\",\n [\n pytest.param(\n \"joblib\", 1, 3, id=\"parallel_backend='joblib', n_jobs=1, n_clusters=3\"\n ),\n pytest.param(None, 1, 3, id=\"parallel_backend=None, n_jobs=1, n_clusters=3\"),\n # TODO: Add back this test param in rust side extension\n # pytest.param(\n # \"rust\", 1, 3, id=\"parallel_backend='rust', n_jobs=1, n_clusters=3\"\n # ),\n ],\n)\ndef test_optimalk(parallel_backend, n_jobs, n_clusters):\n \"\"\"\n Test core functionality of OptimalK using all backends.\n \"\"\"\n\n # Create optimalK instance\n optimalK = OptimalK(parallel_backend=parallel_backend, n_jobs=n_jobs)\n\n # Create data\n X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)\n\n suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10))\n\n assert np.allclose(\n suggested_clusters, n_clusters, 2\n ), \"Correct clusters is {}, OptimalK suggested {}\".format(\n n_clusters, suggested_clusters\n )\n\n\[email protected](\n \"TEST_RUST_EXT\" not in os.environ, reason=\"Rust extension not built.\"\n)\ndef test_optimalk_rust_ext():\n \"\"\"\n Test core functionality of OptimalK using all backends.\n \"\"\"\n\n # Create optimalK instance\n optimalK = OptimalK(parallel_backend=\"rust\", n_jobs=1)\n\n # Create data\n X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)\n\n suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10))\n\n assert np.allclose(\n suggested_clusters, 3, 2\n ), \"Correct clusters is {}, OptimalK suggested {}\".format(3, suggested_clusters)\n\n\ndef test_optimalk_cluster_array_vs_data_sizes_error():\n \"\"\"\n Test ValueError when cluster_array is larger than dataset.\n \"\"\"\n import numpy as np\n from gap_statistic import OptimalK\n\n # Create optimalK instance\n optimalK = OptimalK(parallel_backend=None, n_jobs=-1)\n\n # Create data\n X, y = make_blobs(n_samples=5, n_features=2, centers=3)\n\n with pytest.raises(ValueError) as excinfo:\n optimalK(X, cluster_array=np.arange(1, 10))\n assert \"The number of suggested clusters to try\" in str(excinfo.value)\n\n\ndef test_optimalk_cluster_array_values_error():\n \"\"\"\n Test ValueError when cluster_array contains values less than 1\n \"\"\"\n from gap_statistic import OptimalK\n\n # Create optimalK instance\n optimalK = OptimalK(parallel_backend=None, n_jobs=-1)\n\n # Create data\n X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)\n\n with pytest.raises(ValueError) as excinfo:\n optimalK(X, cluster_array=[0, -1, 1, 2, 3])\n assert \"cluster_array contains values less than 1\" in str(excinfo.value)\n\n\ndef test_optimalk_cluster_array_empty_error():\n \"\"\"\n Test ValueError when cluster_array is empty.\n \"\"\"\n from gap_statistic import OptimalK\n\n # Create optimalK instance\n optimalK = OptimalK(parallel_backend=None, n_jobs=-1)\n\n # Create data\n X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)\n\n with pytest.raises(ValueError) as excinfo:\n optimalK(X, cluster_array=[])\n assert \"The supplied cluster_array has no values.\" in str(excinfo.value)\n\n\ndef test_dunders():\n \"\"\"\n Test that implemented dunder methods don't return errors\n \"\"\"\n from gap_statistic import OptimalK\n\n optimalK = OptimalK()\n optimalK.__str__()\n optimalK.__repr__()\n optimalK._repr_html_()\n"
] | [
[
"numpy.arange",
"numpy.allclose",
"sklearn.datasets.make_blobs"
]
] |
noushadkhan01/my_methods | [
"fc467d5c34b9b5dd105e32cc5aad218d3f6408a8"
] | [
"my_methods/my_cap_curve.py"
] | [
"def my_cap_curve(model, X, y, figsize = (10, 5),legend_font_size = 10,loc = 'best',\n linewidth = 2,label_font_size = 10, poly_features = False, extra_name = None):\n import matplotlib.pyplot as plt\n import numpy as np\n import my_global_variables\n from sklearn.metrics import roc_curve, auc\n class_name = model.__class__.__name__\n if poly_features:\n class_name = class_name + '_poly'\n if extra_name:\n class_name += '_' + extra_name\n total = len(y)\n class_1_count = np.sum(y)\n class_0_count = total - class_1_count\n probs = model.predict_proba(X)\n probs = probs[:, 1]\n model_y = [y for _, y in sorted(zip(probs, y), reverse = True)]\n y_values = np.append([0], np.cumsum(model_y))\n x_values = np.arange(0, total + 1)\n # Area under Random Model\n a = auc([0, total], [0, class_1_count])\n\n # Area between Perfect and Random Model\n aP = auc([0, class_1_count, total], [0, class_1_count, class_1_count]) - a\n\n # Area between Trained and Random Model\n aR = auc(x_values, y_values) - a\n plt.figure(figsize = (figsize))\n plt.plot([0, total], [0, class_1_count], c = 'r', linestyle = '--', label = 'Random Model')\n plt.plot([0, class_1_count, total], [0, class_1_count, class_1_count], c = 'grey', linewidth = linewidth, label = 'Perfect Model')\n plt.plot(x_values, y_values, c = 'b', label = f'{class_name} Classifier Accuracy Rate = {aR/aP}', linewidth = linewidth)\n plt.xlabel('Total observations', fontsize = label_font_size)\n plt.ylabel('Class 1 observations', fontsize = label_font_size)\n plt.title('Cumulative Accuracy Profile', fontsize = label_font_size)\n plt.legend(loc = loc, fontsize = legend_font_size)\n plt.show()\n my_global_variables.model_cap_scores[class_name] = aR/aP\n"
] | [
[
"numpy.sum",
"matplotlib.pyplot.legend",
"numpy.cumsum",
"matplotlib.pyplot.figure",
"sklearn.metrics.auc",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
akurniawan/jina-hub | [
"d89bc5e8f527f1212c3228a15775e222983c0087"
] | [
"encoders/audio/Wav2VecSpeechEncoder/__init__.py"
] | [
"__copyright__ = \"Copyright (c) 2020 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nimport os\nfrom typing import Optional\n\nimport numpy as np\n\nfrom jina.executors.decorators import batching, as_ndarray\nfrom jina.executors.encoders import BaseAudioEncoder\nfrom jina.executors.encoders.frameworks import BaseTorchEncoder\nfrom jina.excepts import PretrainedModelFileDoesNotExist\nfrom jina.helper import cached_property\n\n\nclass Wav2VecSpeechEncoder(BaseTorchEncoder, BaseAudioEncoder):\n \"\"\"\n Use a pre-trained model (`wav2vec`) to encode audio signal.\n\n :class:`Wav2VecSpeechEncoder` is a speech encoder based on `wav2vec`,\n an unsupervised pre-trained model for speech recognition presented and implemented\n by Facebook: https://github.com/pytorch/fairseq/tree/master/examples/wav2vec\n It uses a pre-trained model to encode an audio signal from\n a `Batch x Signal Length` ndarray into a `Batch x Concatenated Features` ndarray,\n and produces a representation for each time step at a rate of 100 Hz.\n\n :param model_path: the path of the pre-trained model.\n The pre-trained model can be downloaded at\n https://github.com/pytorch/fairseq/tree/master/examples/wav2vec/README.md#wav2vec\n :param input_sample_rate: input sampling rate in Hz (22050 by default)\n \"\"\"\n\n def __init__(self,\n model_path: Optional[str] = '/tmp/wav2vec_large.pt',\n input_sample_rate: int = 22050,\n *args,\n **kwargs):\n \"\"\"Set Constructor\"\"\"\n super().__init__(*args, **kwargs)\n self.model_path = model_path\n self.input_sample_rate = input_sample_rate\n\n def post_init(self):\n super().post_init()\n if self.model_path and os.path.exists(self.model_path):\n import torch\n from fairseq.models.wav2vec import Wav2VecModel\n cp = torch.load(self.model_path, map_location=torch.device('cpu'))\n self.model = Wav2VecModel.build_model(cp['args'], task=None)\n self.model.load_state_dict(cp['model'])\n self.model.eval()\n self.to_device(self.model)\n self._tensor_func = torch.tensor\n else:\n raise PretrainedModelFileDoesNotExist(f'model at {self.model_path} does not exist')\n\n @batching\n @as_ndarray\n def encode(self, data: np.ndarray, *args, **kwargs) -> np.ndarray:\n \"\"\"\n Resample input audio signal to 16kHz.\n\n Segments the resampled signal of each Doc into `wav2vec` frames,\n encodes the frames and concatenates Doc frame embeddings into a\n single Doc embedding.\n\n :param data: A`Batch x Signal Length` ndarray, where\n `Signal Length` is a number of samples\n :return: A `Batch x Concatenated Features` ndarray,\n where `Concatenated Features` is a 512-dimensional feature\n vector times the number of the wav2vec frames.\n \"\"\"\n assert data.shape[1] >= 465, 'the signal must have at least 465 samples'\n from librosa import resample\n embeds = []\n with self.session():\n for chunk_data in data:\n resampled_signal = resample(chunk_data, self.input_sample_rate, 16000)\n signal_tensor = self.array2tensor(resampled_signal.reshape(1, -1))\n features = self.model.feature_extractor(signal_tensor)\n embed_tensor = self.model.feature_aggregator(features)[0]\n chunk_embed = self.tensor2array(embed_tensor).T.flatten()\n embeds.append(chunk_embed)\n return embeds\n\n def array2tensor(self, array):\n tensor = self._tensor_func(array)\n return tensor.cuda() if self.on_gpu else tensor\n\n def tensor2array(self, tensor):\n return tensor.cuda().numpy() if self.on_gpu else tensor.numpy()\n\n @cached_property\n def session(self):\n return self.get_session()\n\n def get_session(self):\n from torch import no_grad\n return no_grad"
] | [
[
"torch.device"
]
] |
vigneshyaadav27/Grid-world | [
"a5c4cab46cdafc6458526593ae31ac19a152001d"
] | [
"grid_world.py"
] | [
"#######################################################################\r\n# Copyright (C) #\r\n# 2016-2018 Shangtong Zhang([email protected]) #\r\n# 2016 Kenta Shimada([email protected]) #\r\n# Permission given to modify the code as long as you keep this #\r\n# declaration at the top #\r\n#######################################################################\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom matplotlib.table import Table\r\n\r\nmatplotlib.use('Agg')\r\n\r\nWORLD_SIZE = 5\r\nA_POS = [0, 1]\r\nA_PRIME_POS = [4, 1]\r\nB_POS = [0, 3]\r\nB_PRIME_POS = [2, 3]\r\nDISCOUNT = 0.9\r\n\r\n# left, up, right, down\r\nACTIONS = [np.array([0, -1]),\r\n np.array([-1, 0]),\r\n np.array([0, 1]),\r\n np.array([1, 0])]\r\nACTIONS_FIGS=[ '←', '↑', '→', '↓']\r\n\r\n\r\nACTION_PROB = 0.25\r\n\r\n\r\ndef step(state, action):\r\n if state == A_POS:\r\n return A_PRIME_POS, 10\r\n if state == B_POS:\r\n return B_PRIME_POS, 5\r\n\r\n next_state = (np.array(state) + action).tolist()\r\n x, y = next_state\r\n if x < 0 or x >= WORLD_SIZE or y < 0 or y >= WORLD_SIZE:\r\n reward = -1.0\r\n next_state = state\r\n else:\r\n reward = 0\r\n return next_state, reward\r\n\r\n\r\ndef draw_image(image):\r\n fig, ax = plt.subplots()\r\n ax.set_axis_off()\r\n tb = Table(ax, bbox=[0, 0, 1, 1])\r\n\r\n nrows, ncols = image.shape\r\n width, height = 1.0 / ncols, 1.0 / nrows\r\n\r\n # Add cells\r\n for (i, j), val in np.ndenumerate(image):\r\n\r\n # add state labels\r\n if [i, j] == A_POS:\r\n val = str(val) + \" (A)\"\r\n if [i, j] == A_PRIME_POS:\r\n val = str(val) + \" (A')\"\r\n if [i, j] == B_POS:\r\n val = str(val) + \" (B)\"\r\n if [i, j] == B_PRIME_POS:\r\n val = str(val) + \" (B')\"\r\n \r\n tb.add_cell(i, j, width, height, text=val,\r\n loc='center', facecolor='white')\r\n \r\n\r\n # Row and column labels...\r\n for i in range(len(image)):\r\n tb.add_cell(i, -1, width, height, text=i+1, loc='right',\r\n edgecolor='none', facecolor='none')\r\n tb.add_cell(-1, i, width, height/2, text=i+1, loc='center',\r\n edgecolor='none', facecolor='none')\r\n\r\n ax.add_table(tb)\r\n\r\ndef draw_policy(optimal_values):\r\n fig, ax = plt.subplots()\r\n ax.set_axis_off()\r\n tb = Table(ax, bbox=[0, 0, 1, 1])\r\n\r\n nrows, ncols = optimal_values.shape\r\n width, height = 1.0 / ncols, 1.0 / nrows\r\n\r\n # Add cells\r\n for (i, j), val in np.ndenumerate(optimal_values):\r\n next_vals=[]\r\n for action in ACTIONS:\r\n next_state, _ = step([i, j], action)\r\n next_vals.append(optimal_values[next_state[0],next_state[1]])\r\n\r\n best_actions=np.where(next_vals == np.max(next_vals))[0]\r\n val=''\r\n for ba in best_actions:\r\n val+=ACTIONS_FIGS[ba]\r\n \r\n # add state labels\r\n if [i, j] == A_POS:\r\n val = str(val) + \" (A)\"\r\n if [i, j] == A_PRIME_POS:\r\n val = str(val) + \" (A')\"\r\n if [i, j] == B_POS:\r\n val = str(val) + \" (B)\"\r\n if [i, j] == B_PRIME_POS:\r\n val = str(val) + \" (B')\"\r\n \r\n tb.add_cell(i, j, width, height, text=val,\r\n loc='center', facecolor='white')\r\n\r\n # Row and column labels...\r\n for i in range(len(optimal_values)):\r\n tb.add_cell(i, -1, width, height, text=i+1, loc='right',\r\n edgecolor='none', facecolor='none')\r\n tb.add_cell(-1, i, width, height/2, text=i+1, loc='center',\r\n edgecolor='none', facecolor='none')\r\n\r\n ax.add_table(tb)\r\n\r\n\r\ndef figure_3_2():\r\n value = np.zeros((WORLD_SIZE, WORLD_SIZE))\r\n while True:\r\n # keep iteration until convergence\r\n new_value = np.zeros_like(value)\r\n for i in range(WORLD_SIZE):\r\n for j in range(WORLD_SIZE):\r\n for action in ACTIONS:\r\n (next_i, next_j), reward = step([i, j], action)\r\n # bellman equation\r\n new_value[i, j] += ACTION_PROB * (reward + DISCOUNT * value[next_i, next_j])\r\n if np.sum(np.abs(value - new_value)) < 1e-4:\r\n draw_image(np.round(new_value, decimals=2))\r\n plt.savefig('../images/figure_3_2.png')\r\n plt.close()\r\n break\r\n value = new_value\r\n\r\ndef figure_3_2_linear_system():\r\n '''\r\n Here we solve the linear system of equations to find the exact solution.\r\n We do this by filling the coefficients for each of the states with their respective right side constant.\r\n '''\r\n A = -1 * np.eye(WORLD_SIZE * WORLD_SIZE)\r\n b = np.zeros(WORLD_SIZE * WORLD_SIZE)\r\n for i in range(WORLD_SIZE):\r\n for j in range(WORLD_SIZE):\r\n s = [i, j] # current state\r\n index_s = np.ravel_multi_index(s, (WORLD_SIZE, WORLD_SIZE))\r\n for a in ACTIONS:\r\n s_, r = step(s, a)\r\n index_s_ = np.ravel_multi_index(s_, (WORLD_SIZE, WORLD_SIZE))\r\n\r\n A[index_s, index_s_] += ACTION_PROB * DISCOUNT\r\n b[index_s] -= ACTION_PROB * r\r\n\r\n x = np.linalg.solve(A, b)\r\n draw_image(np.round(x.reshape(WORLD_SIZE, WORLD_SIZE), decimals=2))\r\n plt.savefig('../images/figure_3_2_linear_system.png')\r\n plt.close()\r\n\r\ndef figure_3_5():\r\n value = np.zeros((WORLD_SIZE, WORLD_SIZE))\r\n while True:\r\n # keep iteration until convergence\r\n new_value = np.zeros_like(value)\r\n for i in range(WORLD_SIZE):\r\n for j in range(WORLD_SIZE):\r\n values = []\r\n for action in ACTIONS:\r\n (next_i, next_j), reward = step([i, j], action)\r\n # value iteration\r\n values.append(reward + DISCOUNT * value[next_i, next_j])\r\n new_value[i, j] = np.max(values)\r\n if np.sum(np.abs(new_value - value)) < 1e-4:\r\n draw_image(np.round(new_value, decimals=2))\r\n plt.savefig('../images/figure_3_5.png')\r\n plt.close()\r\n draw_policy(new_value)\r\n plt.savefig('../images/figure_3_5_policy.png')\r\n plt.close()\r\n break\r\n value = new_value\r\n\r\n\r\nif __name__ == '__main__':\r\n figure_3_2_linear_system()\r\n figure_3_2()\r\n figure_3_5()\r\n"
] | [
[
"numpy.zeros_like",
"matplotlib.table.Table",
"numpy.linalg.solve",
"numpy.eye",
"numpy.zeros",
"numpy.ravel_multi_index",
"numpy.round",
"matplotlib.pyplot.savefig",
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.ndenumerate",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.use",
"numpy.array"
]
] |
seiyab/chainer | [
"39fffb9597a6e9646307fba27ad3233c65d38632"
] | [
"chainer/training/extensions/variable_statistics_plot.py"
] | [
"from __future__ import division\nimport os\nimport warnings\n\nimport numpy\nimport six\n\nimport chainer\nfrom chainer import backend\nfrom chainer.backends import cuda\nfrom chainer.training import extension\nfrom chainer.training import trigger as trigger_module\nfrom chainer.utils import argument\n\n\n_available = None\n\n\ndef _try_import_matplotlib():\n global matplotlib, _available\n global _plot_color, _plot_color_trans, _plot_common_kwargs\n try:\n import matplotlib\n _available = True\n except ImportError:\n _available = False\n\n if _available:\n if hasattr(matplotlib.colors, 'to_rgba'):\n _to_rgba = matplotlib.colors.to_rgba\n else:\n # For matplotlib 1.x\n _to_rgba = matplotlib.colors.ColorConverter().to_rgba\n _plot_color = _to_rgba('#1f77b4') # C0 color\n _plot_color_trans = _plot_color[:3] + (0.2,) # apply alpha\n _plot_common_kwargs = {\n 'alpha': 0.2, 'linewidth': 0, 'color': _plot_color_trans}\n\n\ndef _check_available():\n if _available is None:\n _try_import_matplotlib()\n\n if not _available:\n warnings.warn('matplotlib is not installed on your environment, '\n 'so nothing will be plotted at this time. '\n 'Please install matplotlib to plot figures.\\n\\n'\n ' $ pip install matplotlib\\n')\n\n\ndef _unpack_variables(x, memo=None):\n if memo is None:\n memo = ()\n if isinstance(x, chainer.Variable):\n memo += (x,)\n elif isinstance(x, chainer.Link):\n memo += tuple(x.params(include_uninit=True))\n elif isinstance(x, (list, tuple)):\n for xi in x:\n memo += _unpack_variables(xi)\n return memo\n\n\nclass Reservoir(object):\n\n \"\"\"Reservoir sample with a fixed sized buffer.\"\"\"\n\n def __init__(self, size, data_shape, dtype=numpy.float32):\n self.size = size\n self.data = numpy.zeros((size,) + data_shape, dtype=dtype)\n self.idxs = numpy.zeros((size,), dtype=numpy.int32)\n self.counter = 0\n\n def add(self, x, idx=None):\n if self.counter < self.size:\n self.data[self.counter] = x\n self.idxs[self.counter] = idx or self.counter\n elif self.counter >= self.size and \\\n numpy.random.random() < self.size / float(self.counter + 1):\n i = numpy.random.randint(self.size)\n self.data[i] = x\n self.idxs[i] = idx or self.counter\n self.counter += 1\n\n def get_data(self):\n idxs = self.idxs[:min(self.counter, self.size)]\n sorted_args = numpy.argsort(idxs)\n return idxs[sorted_args], self.data[sorted_args]\n\n\nclass Statistician(object):\n\n \"\"\"Helper to compute basic NumPy-like statistics.\"\"\"\n\n def __init__(self, collect_mean, collect_std, percentile_sigmas):\n self.collect_mean = collect_mean\n self.collect_std = collect_std\n self.percentile_sigmas = percentile_sigmas\n\n def __call__(self, x, axis=0, dtype=None, xp=None):\n if axis is None:\n axis = tuple(range(x.ndim))\n elif not isinstance(axis, (tuple, list)):\n axis = axis,\n\n return self.collect(x, axis)\n\n def collect(self, x, axis):\n out = dict()\n\n if self.collect_mean:\n out['mean'] = x.mean(axis=axis)\n\n if self.collect_std:\n out['std'] = x.std(axis=axis)\n\n if self.percentile_sigmas:\n xp = cuda.get_array_module(x)\n p = xp.percentile(x, self.percentile_sigmas, axis=axis)\n out['percentile'] = p\n\n return out\n\n\nclass VariableStatisticsPlot(extension.Extension):\n\n \"\"\"__init__(\\\n targets, max_sample_size=1000, report_data=True,\\\n report_grad=True, plot_mean=True, plot_std=True,\\\n percentile_sigmas=(0, 0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87,\\\n 100), trigger=(1, 'epoch'), filename='statistics.png',\\\n figsize=None, marker=None, grid=True)\n\n Trainer extension to plot statistics for :class:`Variable`\\\\s.\n\n This extension collects statistics for a single :class:`Variable`, a list\n of :class:`Variable`\\\\s or similarly a single or a list of\n :class:`Link`\\\\s containing one or more :class:`Variable`\\\\s. In case\n multiple :class:`Variable`\\\\s are found, the means are computed. The\n collected statistics are plotted and saved as an image in the directory\n specified by the :class:`Trainer`.\n\n Statistics include mean, standard deviation and percentiles.\n\n This extension uses reservoir sampling to preserve memory, using a fixed\n size running sample. This means that collected items in the sample are\n discarded uniformly at random when the number of items becomes larger\n than the maximum sample size, but each item is expected to occur in the\n sample with equal probability.\n\n Args:\n targets (:class:`Variable`, :class:`Link` or list of either):\n Parameters for which statistics are collected.\n max_sample_size (int):\n Maximum number of running samples.\n report_data (bool):\n If ``True``, data (e.g. weights) statistics are plotted. If\n ``False``, they are neither computed nor plotted.\n report_grad (bool):\n If ``True``, gradient statistics are plotted. If ``False``, they\n are neither computed nor plotted.\n plot_mean (bool):\n If ``True``, means are plotted. If ``False``, they are\n neither computed nor plotted.\n plot_std (bool):\n If ``True``, standard deviations are plotted. If ``False``, they\n are neither computed nor plotted.\n percentile_sigmas (float or tuple of floats):\n Percentiles to plot in the range :math:`[0, 100]`.\n trigger:\n Trigger that decides when to save the plots as an image. This is\n distinct from the trigger of this extension itself. If it is a\n tuple in the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``, it\n is passed to :class:`IntervalTrigger`.\n filename (str):\n Name of the output image file under the output directory.\n For historical reasons ``file_name`` is also accepted as an alias\n of this argument.\n figsize (tuple of int):\n Matlotlib ``figsize`` argument that specifies the size of the\n output image.\n marker (str):\n Matplotlib ``marker`` argument that specified the marker style of\n the plots.\n grid (bool):\n Matplotlib ``grid`` argument that specifies whether grids are\n rendered in in the plots or not.\n \"\"\"\n\n def __init__(self, targets, max_sample_size=1000,\n report_data=True, report_grad=True,\n plot_mean=True, plot_std=True,\n percentile_sigmas=(\n 0, 0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87, 100),\n trigger=(1, 'epoch'), filename=None,\n figsize=None, marker=None, grid=True, **kwargs):\n\n file_name, = argument.parse_kwargs(\n kwargs, ('file_name', 'statistics.png')\n )\n if filename is None:\n filename = file_name\n del file_name # avoid accidental use\n\n self._vars = _unpack_variables(targets)\n if not self._vars:\n raise ValueError(\n 'Need at least one variables for which to collect statistics.'\n '\\nActual: 0 <= 0')\n\n if not any((plot_mean, plot_std, bool(percentile_sigmas))):\n raise ValueError('Nothing to plot')\n\n self._keys = []\n if report_data:\n self._keys.append('data')\n if report_grad:\n self._keys.append('grad')\n\n self._report_data = report_data\n self._report_grad = report_grad\n\n self._statistician = Statistician(\n collect_mean=plot_mean, collect_std=plot_std,\n percentile_sigmas=percentile_sigmas)\n\n self._plot_mean = plot_mean\n self._plot_std = plot_std\n self._plot_percentile = bool(percentile_sigmas)\n\n self._trigger = trigger_module.get_trigger(trigger)\n self._filename = filename\n self._figsize = figsize\n self._marker = marker\n self._grid = grid\n\n if not self._plot_percentile:\n n_percentile = 0\n else:\n if not isinstance(percentile_sigmas, (list, tuple)):\n n_percentile = 1 # scalar, single percentile\n else:\n n_percentile = len(percentile_sigmas)\n self._data_shape = (\n len(self._keys), int(plot_mean) + int(plot_std) + n_percentile)\n self._samples = Reservoir(max_sample_size, data_shape=self._data_shape)\n\n @staticmethod\n def available():\n _check_available()\n return _available\n\n def __call__(self, trainer):\n if self.available():\n # Dynamically import pyplot to call matplotlib.use()\n # after importing chainer.training.extensions\n import matplotlib.pyplot as plt\n else:\n return\n\n xp = backend.get_array_module(self._vars[0].data)\n stats = xp.zeros(self._data_shape, dtype=xp.float32)\n for i, k in enumerate(self._keys):\n xs = []\n for var in self._vars:\n x = getattr(var, k, None)\n if x is not None:\n xs.append(x.ravel())\n if xs:\n stat_dict = self._statistician(\n xp.concatenate(xs, axis=0), axis=0, xp=xp)\n stat_list = []\n if self._plot_mean:\n stat_list.append(xp.atleast_1d(stat_dict['mean']))\n if self._plot_std:\n stat_list.append(xp.atleast_1d(stat_dict['std']))\n if self._plot_percentile:\n stat_list.append(xp.atleast_1d(stat_dict['percentile']))\n stats[i] = xp.concatenate(stat_list, axis=0)\n\n if xp == cuda.cupy:\n stats = cuda.to_cpu(stats)\n self._samples.add(stats, idx=trainer.updater.iteration)\n\n if self._trigger(trainer):\n file_path = os.path.join(trainer.out, self._filename)\n self.save_plot_using_module(file_path, plt)\n\n def save_plot_using_module(self, file_path, plt):\n nrows = int(self._plot_mean or self._plot_std) \\\n + int(self._plot_percentile)\n ncols = len(self._keys)\n\n fig, axes = plt.subplots(\n nrows, ncols, figsize=self._figsize, sharex=True)\n\n if not isinstance(axes, numpy.ndarray): # single subplot\n axes = numpy.asarray([axes])\n if nrows == 1:\n axes = axes[None, :]\n elif ncols == 1:\n axes = axes[:, None]\n assert axes.ndim == 2\n\n idxs, data = self._samples.get_data()\n\n # Offset to access percentile data from `data`\n offset = int(self._plot_mean) + int(self._plot_std)\n n_percentile = data.shape[-1] - offset\n n_percentile_mid_floor = n_percentile // 2\n n_percentile_odd = n_percentile % 2 == 1\n\n for col in six.moves.range(ncols):\n row = 0\n ax = axes[row, col]\n ax.set_title(self._keys[col]) # `data` or `grad`\n\n if self._plot_mean or self._plot_std:\n if self._plot_mean and self._plot_std:\n ax.errorbar(\n idxs, data[:, col, 0], data[:, col, 1],\n color=_plot_color, ecolor=_plot_color_trans,\n label='mean, std', marker=self._marker)\n else:\n if self._plot_mean:\n label = 'mean'\n elif self._plot_std:\n label = 'std'\n ax.plot(\n idxs, data[:, col, 0], color=_plot_color, label=label,\n marker=self._marker)\n row += 1\n\n if self._plot_percentile:\n ax = axes[row, col]\n for i in six.moves.range(n_percentile_mid_floor + 1):\n if n_percentile_odd and i == n_percentile_mid_floor:\n # Enters at most once per sub-plot, in case there is\n # only a single percentile to plot or when this\n # percentile is the mid percentile and the number of\n # percentiles are odd\n ax.plot(\n idxs, data[:, col, offset + i], color=_plot_color,\n label='percentile', marker=self._marker)\n else:\n if i == n_percentile_mid_floor:\n # Last percentiles and the number of all\n # percentiles are even\n label = 'percentile'\n else:\n label = '_nolegend_'\n ax.fill_between(\n idxs,\n data[:, col, offset + i],\n data[:, col, -i - 1],\n label=label,\n **_plot_common_kwargs)\n ax.set_xlabel('iteration')\n\n for ax in axes.ravel():\n ax.legend()\n if self._grid:\n ax.grid()\n ax.set_axisbelow(True)\n\n fig.savefig(file_path)\n plt.close()\n"
] | [
[
"matplotlib.colors.ColorConverter",
"numpy.zeros",
"numpy.argsort",
"numpy.asarray",
"matplotlib.pyplot.subplots",
"numpy.random.random",
"matplotlib.pyplot.close",
"numpy.random.randint"
]
] |
Gikiman/executors | [
"98658b4136859164390cfccbde8cf0f7cf843593"
] | [
"jinahub/encoders/audio/VGGISHAudioEncoder/vggish_audio_encoder.py"
] | [
"__copyright__ = \"Copyright (c) 2021 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nimport os\nfrom pathlib import Path\nfrom typing import Any, Optional, List, Iterable\n\nfrom jina import Executor, requests, DocumentArray\nfrom jina.logging.logger import JinaLogger\nimport requests as _requests\nimport tensorflow as tf\n\ntf.compat.v1.disable_eager_execution()\n\nfrom .vggish.vggish_postprocess import *\nfrom .vggish.vggish_slim import *\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\n\nclass VggishAudioEncoder(Executor):\n \"\"\"\n Encode audio data with Vggish embeddings\n\n :param model_path: path of the models directory\n :param default_traversal_paths: fallback batch size in case there is not batch size sent in the request\n \"\"\"\n\n def __init__(self,\n model_path: str = Path(cur_dir) / 'models',\n default_traversal_paths: Optional[Iterable[str]] = None,\n *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.default_traversal_paths = default_traversal_paths or ['r']\n self.logger = JinaLogger(self.__class__.__name__)\n\n self.model_path = Path(model_path)\n self.vgg_model_path = self.model_path / 'vggish_model.ckpt'\n self.pca_model_path = self.model_path / 'vggish_pca_params.ckpt'\n self.model_path.mkdir(exist_ok=True) # Create the model directory if it does not exist yet\n\n if not self.vgg_model_path.exists():\n self.logger.info('VGGish model cannot be found from the given model path, downloading a new one...')\n try:\n r = _requests.get('https://storage.googleapis.com/audioset/vggish_model.ckpt')\n r.raise_for_status()\n except _requests.exceptions.HTTPError:\n self.logger.error('received HTTP error response, cannot download vggish model')\n raise\n except _requests.exceptions.RequestException:\n self.logger.error('Connection error, cannot download vggish model')\n raise\n\n with open(self.vgg_model_path, 'wb') as f:\n f.write(r.content)\n\n if not self.pca_model_path.exists():\n self.logger.info('PCA model cannot be found from the given model path, downloading a new one...')\n try:\n r = _requests.get('https://storage.googleapis.com/audioset/vggish_pca_params.npz')\n r.raise_for_status()\n except _requests.exceptions.HTTPError:\n self.logger.error('received HTTP error response, cannot download pca model')\n raise\n except _requests.exceptions.RequestException:\n self.logger.error('Connection error, cannot download pca model')\n raise\n\n with open(self.pca_model_path, 'wb') as f:\n f.write(r.content)\n\n\n self.sess = tf.compat.v1.Session()\n define_vggish_slim()\n load_vggish_slim_checkpoint(self.sess, str(self.vgg_model_path))\n self.feature_tensor = self.sess.graph.get_tensor_by_name(\n INPUT_TENSOR_NAME)\n self.embedding_tensor = self.sess.graph.get_tensor_by_name(\n OUTPUT_TENSOR_NAME)\n self.post_processor = Postprocessor(str(self.pca_model_path))\n\n @requests\n def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):\n \"\"\"\n Compute embeddings and store them in the `docs` array.\n\n :param docs: documents sent to the encoder. The docs must have `text`.\n By default, the input `text` must be a `list` of `str`.\n :param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,\n `parameters={'traversal_paths': ['r'], 'batch_size': 10}`.\n :param kwargs: Additional key value arguments.\n :return:\n \"\"\"\n if docs:\n cleaned_document_array = self._get_input_data(docs, parameters)\n self._create_embeddings(cleaned_document_array)\n\n def _get_input_data(self, docs: DocumentArray, parameters: dict):\n \"\"\"Create a filtered set of Documents to iterate over.\"\"\"\n\n traversal_paths = parameters.get('traversal_paths', self.default_traversal_paths)\n\n # traverse thought all documents which have to be processed\n flat_docs = docs.traverse_flat(traversal_paths)\n\n # filter out documents without images\n filtered_docs = DocumentArray([doc for doc in flat_docs if doc.blob is not None])\n\n return filtered_docs\n\n def _create_embeddings(self, filtered_docs: Iterable):\n \"\"\"Update the documents with the embeddings generated by VGGISH\"\"\"\n\n for d in filtered_docs:\n # Vggish broadcasts across different length audios, not batches\n [embedding] = self.sess.run([self.embedding_tensor], feed_dict={self.feature_tensor: d.blob})\n result = self.post_processor.postprocess(embedding)\n d.embedding = np.mean((np.float32(result) - 128.) / 128., axis=0)\n\n def close(self):\n self.sess.close()\n"
] | [
[
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.Session"
]
] |
iguarna/uwb-ieee | [
"782813b8a6fc9effeb076c47cd5d497b6e62b330"
] | [
"uwb_channel.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef gen_channel(parameters, fc=5E9, fs=2E9, dynamic_range=30):\n\n # Calculate samples/nanosec ratio\n nanosec_to_samples = int(1E-9 * fs)\n\n #####################################\n # Unpack parameters and convert units\n\n cluster_rate = parameters['cluster_rate'] / nanosec_to_samples\n inter_cluster_rate_1 = parameters['inter_cluster_rate_1'] / nanosec_to_samples\n inter_cluster_rate_2 = parameters['inter_cluster_rate_2'] / nanosec_to_samples\n beta = parameters['beta']\n cluster_decay = parameters['cluster_decay'] * nanosec_to_samples\n inter_cluster_decay = parameters['inter_cluster_decay'] * nanosec_to_samples\n mean_m = parameters['mean_m']\n std_m = parameters['std_m']\n std_cluster_shadowing = parameters['std_cluster_shadowing']\n kf = parameters['kf']\n\n #########################\n # Obtain impulse response\n\n if inter_cluster_decay > cluster_decay:\n raise ValueError(\"Inter cluster decay cannot be larger than cluster decay.\")\n\n max_t = int(dynamic_range * cluster_decay * np.log(10) / 10)\n\n h = np.zeros(max_t, dtype=complex)\n\n t = 0\n\n while t < max_t:\n tau = 0\n\n max_tau = int((max_t - t) * inter_cluster_decay / cluster_decay)\n\n cluster_power = np.exp(-t / cluster_decay) * np.random.lognormal(mean=0, sigma=std_cluster_shadowing)\n\n while tau < max_tau:\n\n # Mean power for this ray\n mean_power = cluster_power * np.exp(-tau / inter_cluster_decay)\n\n # Nakagami m-factor is log normally distributed\n m = np.random.lognormal(mean_m, std_m)\n\n # Compute amplitude as Nakagami distributed\n a = np.sqrt(np.random.gamma(shape=m, scale=mean_power / m))\n\n # Compute phase as uniformly distributed\n phi = np.random.uniform(0, 2 * np.pi)\n\n h[t + tau] = np.array([a * np.exp(-1j * phi)])[0]\n\n if np.random.uniform(0, 1) < beta:\n inter_cluster_rate = inter_cluster_rate_1\n else:\n inter_cluster_rate = inter_cluster_rate_2\n\n tau += round(np.random.exponential(1 / inter_cluster_rate))\n\n t += round(np.random.exponential(1 / cluster_rate))\n\n ##########################\n # Add frequency dependency\n\n # Zero padding before FFT to avoid artifacts\n h = np.append(h, np.zeros(h.size, dtype=complex))\n\n H = np.fft.fft(h, norm='ortho')\n\n # Get frequency array in the same order as produced by the FFT\n freq = np.linspace(fc - fs / 2, fc + fs / 2, num=h.size)\n freq = np.append(freq[freq.size // 2:], freq[:freq.size // 2])\n\n # Calculate frequency dependency and apply\n Gf = np.power(freq, -2 * kf)\n H = np.multiply(Gf, H)\n\n # Inverse FFT\n h = np.fft.ifft(H, norm='ortho')\n\n # Remove padding\n h = h[:h.size // 2]\n\n ###############\n # Normalization\n\n h = normalize(h)\n\n return h\n\n\ndef normalize(s):\n return s / np.sqrt(energy(s))\n\n\ndef energy(s):\n return np.sum(np.square(np.abs(s)))\n\n\nif __name__ == '__main__':\n parameters_cm1 = {\n 'cluster_rate': 0.047,\n 'inter_cluster_rate_1': 1.54,\n 'inter_cluster_rate_2': 0.15,\n 'beta': 0.095,\n 'cluster_decay': 22.61,\n 'inter_cluster_decay': 12.53,\n 'mean_m': 0.67,\n 'std_m': 0.28,\n 'std_cluster_shadowing': 2.75,\n 'kf': 1.12,\n 'kd': 1.79,\n 'std_path_shadowing': 2.22\n\n }\n\n h = gen_channel(parameters=parameters_cm1,\n fc=(10.6E9 + 3.1E9) / 2,\n fs=6E9,\n dynamic_range=30)\n\n plt.plot(np.abs(h))\n plt.show()\n"
] | [
[
"numpy.random.uniform",
"numpy.fft.fft",
"numpy.multiply",
"numpy.append",
"numpy.zeros",
"numpy.abs",
"numpy.exp",
"numpy.fft.ifft",
"numpy.power",
"matplotlib.pyplot.show",
"numpy.random.lognormal",
"numpy.random.exponential",
"numpy.log",
"numpy.random.gamma",
"numpy.linspace"
]
] |
cbarros7/holbertonschool-machine_learning | [
"1edb4c253441f6319b86c9c590d1e7dd3fc32bf4",
"1edb4c253441f6319b86c9c590d1e7dd3fc32bf4"
] | [
"supervised_learning/0x03-optimization/12-learning_rate_decay.py",
"unsupervised_learning/0x03-hyperparameter_tuning/4-bayes_opt.py"
] | [
"#!/usr/bin/env python3\n\"\"\"Learning Rate Decay Upgraded\"\"\"\nimport tensorflow as tf\n\n\ndef learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n \"\"\"learning_rate_decay: creates a learning rate decay operation in\n tensorflow using inverse time decay:\n\n Args:\n alpha: is the original learning rate\n decay_rate: is the weight used to determine the rate at\n which alpha will decay\n global_step: is the number of passes of gradient descent\n that have elapsed\n decay_step: is the number of passes of gradient descent\n that should occur before alpha is decayed further\n\n Returns: the learning rate decay operation\n \"\"\"\n return tf.train.inverse_time_decay(\n learning_rate=alpha, global_step=global_step, decay_steps=decay_step,\n decay_rate=decay_rate, staircase=True, name=None\n )\n",
"#!/usr/bin/env python3\n\"\"\"\n4-bayes_opt.py\n\"\"\"\nimport numpy as np\nfrom scipy.stats import norm\nGP = __import__('2-gp').GaussianProcess\n\n\nclass BayesianOptimization:\n \"\"\"\n Class that instantiates a Bayesian optimization\n on a noiseless 1D Gaussian process\n \"\"\"\n\n def __init__(self, f, X_init, Y_init, bounds,\n ac_samples, l=1, sigma_f=1, xsi=0.01, minimize=True):\n \"\"\"define and initialize variables and methods\"\"\"\n\n self.f = f\n self.gp = GP(X_init, Y_init, l, sigma_f)\n self.X_s = np.linspace(bounds[0], bounds[1],\n num=ac_samples)[..., np.newaxis]\n self.xsi = xsi\n self.minimize = minimize\n\n def acquisition(self):\n \"\"\"function that calculates the next best sample location\"\"\"\n\n # Compute mu and sigma in a call to predict() on gp\n mu, sigma = self.gp.predict(self.X_s)\n # print(\"mu:\", mu, mu.shape)\n # print(\"sigma:\", sigma, sigma.shape)\n\n # Note: sigma of shape (s,)\n Z = np.zeros(sigma.shape)\n if self.minimize is True:\n f_plus = np.min(self.gp.Y)\n Z_NUM = f_plus - mu - self.xsi\n else:\n f_plus = np.max(self.gp.Y)\n Z_NUM = mu - f_plus - self.xsi\n\n for i in range(sigma.shape[0]):\n if sigma[i] > 0:\n Z[i] = Z_NUM[i] / sigma[i]\n else:\n Z[i] = 0\n\n # Compute the Expected Improvement (EI)\n EI = np.zeros(sigma.shape)\n for i in range(sigma.shape[0]):\n if sigma[i] > 0:\n EI[i] = Z_NUM[i] * norm.cdf(Z[i]) + sigma[i] * norm.pdf(Z[i])\n else:\n EI[i] = 0\n X_next = self.X_s[np.argmax(EI)]\n\n # print(\"EI:\", EI)\n # print(\"self.X_s:\", self.X_s)\n return X_next, EI\n"
] | [
[
"tensorflow.train.inverse_time_decay"
],
[
"numpy.zeros",
"scipy.stats.norm.pdf",
"numpy.argmax",
"numpy.max",
"numpy.min",
"numpy.linspace",
"scipy.stats.norm.cdf"
]
] |
raphaottoni/youtube-hatespeech | [
"4467d065c2bddd86feec673ad74670c4c975f017"
] | [
"analyses/weat.py"
] | [
"from gensim import corpora, models, similarities\nimport json\nimport numpy\nimport random\nimport csv\nimport itertools\nimport gensim, copy\nfrom collections import defaultdict\nimport glob, os\n\ntarget_words = {}\nattribute_words= {}\nweats_name = []\n\n# 1 WEAT Immigrants\ntarget_words[\"1_a\"] = [\"immigrant\", \"migrant\"]\ntarget_words[\"1_b\"] = [\"citizen\", \"native\"]\nattribute_words[\"1_a\"] = [\"bad\", \"burden\", \"pirate\", \"plague\", \"taker\", \"thief\"]\nattribute_words[\"1_b\"] = [\"good\", \"honest\", \"maker\", \"rightful\"]\nweats_name.append(\"Immigrants\")\n\n# 2 WEAT Muslims\ntarget_words[\"2_a\"] = [\"islamism\", \"muhammed\", \"muslim\", \"quran\"]\ntarget_words[\"2_b\"] = [\"bible\", \"christian\", \"christianity\", \"jesus\"]\nattribute_words[\"2_a\"] = [\"assassin\", \"attack\", \"bomb\", \"death\", \"murder\", \"radical\", \"terrorist\"]\nattribute_words[\"2_b\"] = [\"compassionate\", \"gentle\", \"humane\", \"kind\", \"tolerant\"]\nweats_name.append(\"Muslims\")\n\n# 3 WEAT LGBT people\ntarget_words[\"3_a\"] = [\"bisexual\", \"gay\", \"homosexual\", \"lesbian\"]\ntarget_words[\"3_b\"] = [\"het\", \"hetero\", \"heterosexual\", \"straight\"]\nattribute_words[\"3_a\"] = [\"immoral\", \"outrageous\", \"promiscuous\", \"revolting\", \"sinner\"]\nattribute_words[\"3_b\"] = [\"moral\", \"natural\", \"normal\"]\nweats_name.append(\"LGBT People\")\n\n# 4 WEAT math arts male_female\ntarget_words[\"4_a\"] = [\"math\",\"algebra\",\"geometry\",\"calculus\",\"equations\",\"computation\",\"numbers\" ]\ntarget_words[\"4_b\"] = [\"poetry\",\"art\",\"dance\",\"literature\",\"novel\",\"symphony\",\"drama\",\"sculpture\" ]\nattribute_words[\"4_a\"] = [\"male\",\"man\",\"boy\",\"brother\",\"he\",\"him\",\"his\", \"son\"]\nattribute_words[\"4_b\"] = [\"female\",\"woman\",\"girl\",\"sister\",\"she\",\"her\",\"hers\", \"daughter\"]\nweats_name.append(\"Math Arts\")\n\n# 5 WEAT Judaism and Christianity\n# Religious Judaism Christianity\ntarget_words[\"5_a\"] = [\"church\",\"bible\",\"christian\",\"christianity\"]\ntarget_words[\"5_b\"] = [\"synagogue\",\"torah\",\"jew\",\"judaism\"]\nattribute_words[\"5_a\"] = [\"terrific\",\"delight\",\"celebrate\",\"glad\",\"beautiful\",\"appealing\", \"pleasure\", \"enjoy\" ]\nattribute_words[\"5_b\"] = [\"hate\",\"angry\",\"detest\",\"despise\",\"dirty\",\"humiliate\",\"pain\", \"horrible\"]\nweats_name.append(\"Jewish\")\n\n# 6 WEAT African American 1\ntarget_words[\"6_a\"] = [\"brad\", \"brendan\", \"geoffrey\", \"greg\", \"brett\", \"jay\",\n \"matthew\", \"neil\", \"todd\", \"allison\", \"anne\", \"carrie\",\n \"emily\", \"jill\", \"laurie\", \"kristen\", \"meredith\", \"sarah\"]\n\ntarget_words[\"6_b\"] = [\"darnell\", \"hakim\", \"jermaine\", \"kareem\", \"jamal\",\n \"leroy\", \"rasheed\", \"tremayne\", \"tyrone\", \"aisha\", \n \"ebony\", \"keisha\", \"kenya\", \"latonya\", \"lakisha\", \n \"latoya\", \"tamika\", \"tanisha\"]\nattribute_words[\"6_a\"] = [ \"joy\", \"love\", \"peace\", \"wonderful\", \"pleasure\", \"friend\", \"laughter\", \"happy\"]\nattribute_words[\"6_b\"] = [\"agony\", \"terrible\", \"horrible\", \"nasty\", \"evil\", \"war\", \"awful\",\"failure\"]\nweats_name.append(\"African - 1\")\n\n# 7 WEAT African American 2\ntarget_words[\"7_a\"] = [\"brad\", \"brendan\", \"geoffrey\", \"greg\", \"brett\", \"jay\",\n \"matthew\", \"neil\", \"todd\", \"allison\", \"anne\", \"carrie\",\n \"emily\", \"jill\", \"laurie\", \"kristen\", \"meredith\", \"sarah\"]\n\ntarget_words[\"7_b\"] = [\"darnell\", \"hakim\", \"jermaine\", \"kareem\", \"jamal\",\n \"leroy\", \"rasheed\", \"tremayne\", \"tyrone\", \"aisha\", \n \"ebony\", \"keisha\", \"kenya\", \"latonya\", \"lakisha\", \n \"latoya\", \"tamika\", \"tanisha\"]\nattribute_words[\"7_a\"] = [\"caress\", \"freedom\", \"health\", \"love\", \"peace\",\n \"cheer\", \"friend\", \"heaven\", \"loyal\", \"pleasure\", \n \"diamond\", \"gentle\", \"honest\", \"lucky\", \"rainbow\",\n \"diploma\", \"gift\", \"honor\", \"miracle\", \"sunrise\",\n \"family\", \"happy\",\"laughter\",\"paradise\", \"vacation\"] \n\nattribute_words[\"7_b\"] = [\"abuse\", \"crash\", \"filth\", \"murder\", \"sickness\",\n \"accident\", \"death\", \"grief\", \"poison\", \"stink\", \n \"assault\", \"disaster\", \"hatred\",\"pollute\", \"tragedy\", \n \"bomb\", \"divorce\", \"jail\", \"poverty\", \"ugly\", \"cancer\",\n \"evil\", \"kill\", \"rotten\",\"vomit\"]\nweats_name.append(\"African - 2\")\n\n\n\n\n\n\ndef statistic_test(X,Y,A,B,M):\n result = 0.0\n sum_X = 0.0\n sum_Y = 0.0\n\n for word_X in X:\n sum_X += sub_statistic_test(word_X, A,B,M)\n for word_Y in Y:\n sum_Y += sub_statistic_test(word_Y, A,B,M)\n\n return (sum_X - sum_Y)\n\ndef sub_statistic_test(w,A,B,M):\n result = 0.0\n sum_cos_A = 0.0\n sum_cos_B = 0.0\n\n for word_A in A:\n sum_cos_A += numpy.dot(M[w],M[word_A])/(numpy.linalg.norm(M[w])*numpy.linalg.norm(M[word_A]))\n for word_B in B:\n sum_cos_B += numpy.dot(M[w],M[word_B])/(numpy.linalg.norm(M[w])*numpy.linalg.norm(M[word_B]))\n\n return (sum_cos_A/len(A) - sum_cos_B/len(B))\n\ndef effect_size(x_words,y_words,a_attributes,b_attributes,M):\n # Effect size\n test_x = 0.0\n test_y = 0.0\n samples = []\n\n for word_x in target_words[x_words]:\n test_x += sub_statistic_test(word_x,attribute_words[a_attributes],attribute_words[b_attributes],M)\n samples.append(sub_statistic_test(word_x,attribute_words[a_attributes],attribute_words[b_attributes],M))\n\n for word_y in target_words[y_words]:\n test_y += sub_statistic_test(word_y,attribute_words[a_attributes],attribute_words[b_attributes],M)\n samples.append(sub_statistic_test(word_y,attribute_words[a_attributes],attribute_words[b_attributes],M))\n\n mean_x = test_x/len(target_words[x_words])\n mean_y = test_y/len(target_words[y_words])\n\n std_dev = numpy.std(samples)\n effect_size = (mean_x - mean_y)/std_dev\n return effect_size\n\n\n# P-Value\ndef p_value(X,Y,A,B,model):\n null_hipotese_evidance = 0.0\n number_permitations = 0.0\n\n # Finds the biggest possible set of the same size for the two classes\n X_size = len(target_words[X])\n Y_size = len(target_words[Y])\n size = max(X_size, Y_size)\n union = set(target_words[X] + target_words[Y])\n random_test_statistic_values = []\n test_statistic_value = statistic_test(target_words[X],target_words[Y],attribute_words[A],attribute_words[B],model)\n\n if (Y_size + X_size) < 14:\n # there will be less than 5000 combinations\n permutations = itertools.combinations(union,size)\n\n for i,permutation in enumerate(permutations):\n x_i = permutation\n y_i = union - set(permutation)\n test_value = statistic_test(x_i,y_i,attribute_words[A],attribute_words[B],model)\n\n random_test_statistic_values.append(test_value)\n if( test_value > test_statistic_value):\n null_hipotese_evidance += 1\n number_permitations += 1\n\n #print(\"null hipotese_evidance: \" + str(null_hipotese_evidance))\n #print(\"num_permutations: \" + str(number_permitations))\n #print(\"P-Value():\")\n #print(null_hipotese_evidance/number_permitations)\n p_value_result = null_hipotese_evidance/number_permitations\n #print(\"enviando \" + str(p_value_result))\n return(p_value_result)\n\n else:\n # There will be more than 5000, thus we should randomize\n print(\"Generating 5k random\")\n classes = target_words[X] + target_words[Y]\n\n for i in range(5000):\n random.shuffle(classes)\n x_i = classes[:size]\n y_i = classes[size+1:]\n test_value = statistic_test(x_i,y_i,attribute_words[A],attribute_words[B],model)\n # save the valus to be used for each channel\n random_test_statistic_values.append(test_value)\n if( test_value > test_statistic_value):\n null_hipotese_evidance += 1\n number_permitations += 1\n #if number_permitations % 100 == 0:\n # print(number_permitations)\n\n #print(\"null hipotese_evidance: \" + str(null_hipotese_evidance))\n #print(\"num_permutations: \" + str(number_permitations))\n #print(\"P-Value(english):\")\n #print(null_hipotese_evidance/number_permitations)\n p_value_result = null_hipotese_evidance/number_permitations\n return(p_value_result)\n\n\n\ndef main():\n\n # Which models to load\n political_biases_model = [\"left\", \"leftcenter\", \"center\", \"right-center\", \"right\"]\n model_types = [ \"captions\", \"comments\"]\n\n \n # list of WEATs to execute\n weats = [1,2,3]\n \n with open(\"../data/weat/weat_results.csv\", \"w\") as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow([\"channel\",\"WEAT\",\"political_bias\", \"source\", \"effect_size\", \"p_value\"])\n \n #for political_bias in political_biases_model:\n # for model_type in model_types: \n\n # for file in os.listdir(\"../models/biases/\" + model_type + \"/\" + political_bias):\n # if file.endswith(\".model\"):\n # print(\"Loading \" + political_bias + \" word2vec \" + model_type + \" model \" + \"(\" + file + \")\") \n # model = gensim.models.Word2Vec.load(\"../models/biases/\" + model_type + \"/\" + political_bias+ \"/\" + file)\n # #model = gensim.models.Word2Vec.load(\"../models/wiki-word2vec/wiki-en.word2vec.model\")\n # print(\"Executing WEATs on current model\" )\n # for weat_number in weats:\n # X = str(weat_number) + \"_a\"\n # Y = str(weat_number) + \"_b\"\n # A = str(weat_number) + \"_a\"\n # B = str(weat_number) + \"_b\"\n # ## Effect size of the base model\n # effect_size_result = effect_size(X,Y,A,B,model)\n # print(\"Effect-Size(\"+str(weat_number)+ \"):\" + str(effect_size_result))\n # p_value_result = p_value(X,Y,A,B,model)\n # print(\"P-value(\"+str(weat_number)+ \"):\" + str(p_value_result))\n # writer.writerow([file[:-6],weats_name[weat_number -1],political_bias , model_type, effect_size_result, p_value_result])\n\n # Add the baseline weat results the wikipedia model\n print(\"Loading the wiki base model\")\n model = gensim.models.Word2Vec.load(\"../models/wiki-word2vec/wiki-en.word2vec.model\")\n print(\"Executing WEATs on current model\" )\n for weat_number in weats:\n X = str(weat_number) + \"_a\"\n Y = str(weat_number) + \"_b\"\n A = str(weat_number) + \"_a\"\n B = str(weat_number) + \"_b\"\n ## Effect size of the base model\n effect_size_result = effect_size(X,Y,A,B,model)\n print(\"Effect-Size(\"+str(weat_number)+ \"):\" + str(effect_size_result))\n p_value_result = p_value(X,Y,A,B,model)\n print(\"P-value(\"+str(weat_number)+ \"):\" + str(p_value_result))\n writer.writerow([\"wikipedia\",weats_name[weat_number -1], \"wiki\", \"wiki\", effect_size_result, p_value_result])\n\n \n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.std",
"numpy.dot",
"numpy.linalg.norm"
]
] |
imisi-akande/disaster-response-pipeline | [
"d691e643c57e45b226ca3cb2c0b4a708c7edfe8b"
] | [
"app/run.py"
] | [
"import json\nimport plotly\nimport pandas as pd\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom nltk import pos_tag, word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nimport joblib\nfrom sqlalchemy import create_engine\n\n\napp = Flask(__name__)\n\nclass StartingVerbExtractor(BaseEstimator, TransformerMixin):\n \n def starting_verb(self, text):\n sentence_list = nltk.sent_tokenize(text)\n for sentence in sentence_list:\n pos_tags = nltk.pos_tag(tokenize(sentence))\n first_word, first_tag = pos_tags[0]\n if first_tag in ['VB', 'VBP'] or first_word == 'RT':\n return True\n return False\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_tagged = pd.Series(X).apply(self.starting_verb)\n return pd.DataFrame(X_tagged)\n\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n# load data\nengine = create_engine('sqlite:///../data/disaster_response.db')\ndf = pd.read_sql_table('disaster_response_table', engine)\n\n# load model\nmodel = joblib.load(\"../models/classifier.pkl\")\n\n\n# index webpage displays cool visuals and receives user input text for model\[email protected]('/')\[email protected]('/index')\ndef index():\n # extract data needed for visuals\n # TODO: Below is an example - modify to extract data for your own visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_percent = round(100*genre_counts/genre_counts.sum(), 2)\n genre_names = list(genre_counts.index)\n\n category_names = df.iloc[:,4:].columns\n category_boolean = (df.iloc[:,4:] != 0).sum().values\n\n # create visuals\n # TODO: Below is an example - modify to create your own visuals\n graphs = [\n # GRAPH 1 - genre graph\n {\n \"data\": [\n {\n \"type\": \"pie\",\n \"uid\": \"f4de1f\",\n \"hole\": 0.4,\n \"name\": \"Genre\",\n \"pull\": 0,\n \"domain\": {\n \"x\": genre_percent,\n \"y\": genre_names\n },\n \"marker\": {\n \"colors\": [\n \"#7fc97f\",\n \"#bc5090\",\n \"#ffa600\"\n ]\n },\n \"textinfo\": \"label+value\",\n \"hoverinfo\": \"all\",\n \"labels\": genre_names,\n \"values\": genre_counts\n }\n ],\n \"layout\": {\n \"title\": \"Count and Percentage of Messages by Genre\"\n }\n },\n # GRAPH 2 - category graph\n {\n 'data': [\n Bar(\n x=category_names,\n y=category_boolean\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Categories',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Category\",\n 'tickangle': 35\n }\n }\n }\n ]\n\n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n# web page that handles user query and displays model results\[email protected]('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '')\n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=5000, debug=True)\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"pandas.read_sql_table",
"pandas.Series",
"pandas.DataFrame"
]
] |
ess-dmsc/JustBinIt | [
"dc8242ed44f03e92f60618c96596025ec8cbc40e"
] | [
"tests/test_histogram2d.py"
] | [
"import numpy as np\nimport pytest\n\nfrom just_bin_it.histograms.histogram2d import Histogram2d\n\nIRRELEVANT_TOPIC = \"some-topic\"\n\n\nclass TestHistogram2dFunctionality:\n @pytest.fixture(autouse=True)\n def prepare(self):\n self.pulse_time = 1234\n self.num_bins = (5, 10)\n self.tof_range = (0, 10)\n self.det_range = (0, 5)\n self.data = np.array([x for x in range(self.num_bins[0])])\n self.hist = Histogram2d(\"topic\", self.num_bins, self.tof_range, self.det_range)\n\n def test_if_single_value_for_num_bins_then_value_used_for_both_x_and_y(self):\n num_bins = 5\n hist = Histogram2d(\"topic\", num_bins, self.tof_range, self.det_range)\n assert len(hist.x_edges) == num_bins + 1\n assert len(hist.y_edges) == num_bins + 1\n assert hist.shape == (num_bins, num_bins)\n\n def test_on_construction_histogram_is_uninitialised(self):\n assert self.hist.x_edges is not None\n assert self.hist.y_edges is not None\n assert self.hist.shape == self.num_bins\n assert len(self.hist.x_edges) == self.num_bins[0] + 1\n assert len(self.hist.y_edges) == self.num_bins[1] + 1\n assert self.hist.x_edges[0] == self.data[0]\n assert self.hist.x_edges[-1] == 10\n assert self.hist.y_edges[0] == self.data[0]\n assert self.hist.y_edges[-1] == 5\n assert self.hist.data.sum() == 0\n\n def test_adding_data_to_initialised_histogram_new_data_is_added(self):\n self.hist.add_data(self.pulse_time, self.data, self.data)\n first_sum = self.hist.data.sum()\n\n # Add the data again\n self.hist.add_data(self.pulse_time, self.data, self.data)\n\n # Sum should be double\n assert self.hist.data.sum() == first_sum * 2\n\n def test_adding_data_outside_initial_bins_is_ignored(self):\n self.hist.add_data(self.pulse_time, self.data, self.data)\n first_sum = self.hist.data.sum()\n x_edges = self.hist.x_edges[:]\n y_edges = self.hist.y_edges[:]\n\n # Add data that is outside the edges\n new_data = np.array([x + self.num_bins[0] + 1 for x in range(self.num_bins[0])])\n self.hist.add_data(self.pulse_time, new_data, new_data)\n\n # Sum should not change\n assert self.hist.data.sum() == first_sum\n # Edges should not change\n assert np.array_equal(self.hist.x_edges, x_edges)\n assert np.array_equal(self.hist.y_edges, y_edges)\n\n def test_if_no_id_supplied_then_defaults_to_empty_string(self):\n assert self.hist.identifier == \"\"\n\n def test_id_supplied_then_is_set(self):\n example_id = \"abcdef\"\n hist = Histogram2d(\n \"topic1\",\n self.num_bins,\n self.tof_range,\n self.det_range,\n identifier=example_id,\n )\n assert hist.identifier == example_id\n\n def test_only_data_with_correct_source_is_added(self):\n hist = Histogram2d(\n \"topic\", self.num_bins, self.tof_range, self.det_range, source=\"source1\"\n )\n\n hist.add_data(self.pulse_time, self.data, self.data, source=\"source1\")\n hist.add_data(self.pulse_time, self.data, self.data, source=\"source1\")\n hist.add_data(self.pulse_time, self.data, self.data, source=\"OTHER\")\n\n assert hist.data.sum() == 10\n\n def test_clearing_histogram_data_clears_histogram(self):\n self.hist.add_data(self.pulse_time, self.data, self.data)\n\n self.hist.clear_data()\n\n assert self.hist.data.sum() == 0\n\n def test_after_clearing_histogram_can_add_data(self):\n self.hist.add_data(self.pulse_time, self.data, self.data)\n self.hist.clear_data()\n\n self.hist.add_data(self.pulse_time, self.data, self.data)\n\n assert self.hist.shape == self.num_bins\n assert self.hist.data.sum() == 5\n\n def test_adding_empty_data_does_nothing(self):\n self.hist.add_data(self.pulse_time, [], [])\n\n assert self.hist.data.sum() == 0\n\n def test_histogram_keeps_track_of_last_pulse_time_processed(self):\n self.hist.add_data(1234, self.data, self.data)\n self.hist.add_data(1235, self.data, self.data)\n self.hist.add_data(1236, self.data, self.data)\n\n assert self.hist.last_pulse_time == 1236\n"
] | [
[
"numpy.array_equal"
]
] |
MGH-LMIC/CXR-autolabeling | [
"74eac30bb6eaa6c1d5a8b343743024ef6bd9db7d"
] | [
"autolabeling.py"
] | [
"import re\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as mpl_color_map\n\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom prettytable import PrettyTable\nfrom scipy.ndimage import gaussian_filter\nfrom sklearn.metrics import roc_curve, precision_recall_curve\n\nimport torch\nimport torchnet as tnt\nimport torch.nn.functional as F\n\nfrom utils import logger\nfrom environment import TestEnvironment, initialize, print_label_name\nfrom gradcam import GradCam, save_class_activation_images\nfrom data import CxrDataset, EXT_DATA_BASE\nfrom atlasmethod import EX_AI\n\nimport time\n\nATLAS_GEN = False\natlas_name = 'cardiomegaly'\n# 'cardiomegaly', 'atelectasis', 'pulmonary_edema', 'pneumonia', 'pleural_effusion'\n\nclass Tester:\n def __init__(self, env, pt_runtime=\"test\", fn_net=None, fl_gradcam=False, cls_gradcam=None, id_prob=None, fl_ensemble=False, fl_exai=False, f_name='sim', f_csv=None):\n self.env = env\n self.pt_runtime = pt_runtime\n self.fl_prob = False if id_prob == None else True\n self.id_prob = id_prob\n self.f_name = f_name\n self.fl_ensemble = fl_ensemble\n # for multiple class and binary label tasks\n self.pf_metric = {\n 'loss': [],\n 'accuracy': [],\n 'sensitivity': [],\n 'specificity': [],\n 'auc_score': [],\n 'ap_score': [],\n 'mse_score': []\n }\n self.fn_net = fn_net\n self.fl_gradcam = fl_gradcam\n self.cls_gradcam = cls_gradcam\n self.th_gradcam = 0.5\n self.fl_gradcam_save = True\n\n #explainable methods\n self.fl_exai = fl_exai\n if self.fl_exai:\n self.fl_gradcam = True\n self.cls_gradcam = [\n 'Hilar/mediastinum>Cardiomegaly>.',\n 'Lung density>Increased lung density>Atelectasis',\n 'Lung density>Increased lung density>Pulmonary edema',\n 'Lung density>Increased lung density>pneumonia',\n 'Pleura>Pleural effusion>.'\n ]\n self.th_gradcam = 0.5\n self.ex_method = EX_AI(env, pt_runtime=pt_runtime, thr=0.5, f_name=f_name, ext_data_csv=f_csv)\n\n def load(self):\n pt_file = self.pt_runtime.joinpath(f'train.pkl')\n with open(pt_file, 'rb') as f:\n self.pf_metric = pickle.load(f)\n\n def test_evaluation(self, epoch=1, fl_save=False):\n if self.fn_net == None:\n pt_model = self.pt_runtime.joinpath(f'model_epoch_{epoch:04d}.pth.tar')\n else:\n pt_model = self.pt_runtime.joinpath(str(self.fn_net))\n\n self.env.load_model(pt_model)\n\n try:\n self.load()\n except:\n logger.debug('there is no pkl to load.')\n\n _, _, _ = self.test(epoch, self.env.test_loader, fl_save=fl_save)\n\n if False:\n self.algorithm_attribution(self.env.gradcam_loader)\n\n if self.fl_gradcam:\n _, _, _ = self.gradcam_data(self.env.gradcam_loader)\n\n\n def test_ensemble_evaluation(self, epoch=1, fl_save=False, n_ens=1):\n\n predict = []\n target = []\n\n if self.fl_gradcam:\n cams = np.ones((len(self.env.gradcam_loader), len(self.cls_gradcam), 16, 16))\n\n if ATLAS_GEN:\n gradcam_df = pd.DataFrame(columns=[f'{x:03d}' for x in range(256)])\n\n for k in range(n_ens):\n pt_model = self.pt_runtime.joinpath(str(self.fn_net)+f'_{k:02d}.pth.tar')\n self.env.load_model(pt_model)\n\n #logger.info(f'network to test: {self.env.model}')\n try:\n self.load()\n except:\n logger.debug('there is no pkl to load.')\n\n _, pred, tar = self.test(epoch, self.env.test_loader, fl_save=False)\n\n predict.append(pred)\n target.append(tar)\n\n # evaluate ensemble's performance\n prob_ens = self.ensemble_performance(predict, target, n_ens, fl_save=fl_save)\n\n if self.fl_exai:\n prob_in = pd.DataFrame(prob_ens.cpu().numpy()[:,1:])\n prob_in['PATH'] = self.env.test_loader.dataset.entries['PATH']\n self.ex_method.input_preparation(prob_in)\n\n if self.fl_gradcam:\n cams = np.ones((len(self.env.gradcam_loader), len(self.cls_gradcam), 16, 16))\n for k in range(n_ens):\n pt_model = self.pt_runtime.joinpath(str(self.fn_net)+f'_{k:02d}.pth.tar')\n self.env.load_model(pt_model)\n\n start = time.time()\n _, _, cam = self.gradcam_data(self.env.gradcam_loader, prob_ens=prob_ens)\n #review_cam\n #cams *= cam\n cams += cam\n end = time.time()\n print(f'{k:02d} model gradcam time: {end-start} sec')\n\n _, _, cams = self.gradcam_data(self.env.gradcam_loader, ens_flg=True, cams_ens=cams, prob_ens=prob_ens)\n\n if self.fl_exai:\n start = time.time()\n self.ex_method.run(cams)\n end = time.time()\n print(f'self-annotation time: {end-start} sec')\n\n if ATLAS_GEN:\n for k in range(len(self.env.gradcam_loader)):\n gradcam_df.loc[k] = cams[k].flatten()\n print(f\"[{atlas_name}]Atlas generation: {k:5d}\")\n\n gradcam_df['PATH'] = self.env.gradcam_loader.dataset.entries['PATH']\n gradcam_df.to_csv(self.pt_runtime.joinpath(f'gradcam_atlas_{atlas_name}.csv'), index=False)\n\n\n def ensemble_performance(self, predict, target, n_ens, fl_save=False):\n\n pred_ens = torch.zeros(predict[0].shape).to(self.env.device)\n #pred_ens = np.zeros(predict[0].shape)\n for i in range(n_ens):\n pred_ens += torch.from_numpy(predict[i]).to(self.env.device)\n\n pred_ens /= n_ens\n targ_ens = torch.from_numpy(target[0]).to(self.env.device)\n\n aucs, aps = self.AUC_AP_metric(pred_ens, targ_ens)\n correct, total = self.ACC_metric(pred_ens, targ_ens)\n self.Per_print(correct=correct, total=total, aucs=aucs, aps=aps)\n\n if fl_save:\n test_set = self.env.test_loader.dataset\n labels = self.env.labels\n self.roc_evaluation(test_set, pred_ens, targ_ens, labels)\n\n return pred_ens\n\n\n def AUC_AP_metric(self, output, target):\n out_dim = output.shape[1]\n aucs = [tnt.meter.AUCMeter() for i in range(out_dim)]\n aps = [tnt.meter.APMeter() for i in range(out_dim)]\n\n for i in range(out_dim):\n mask_out, mask_tar = self.mask_pred(output[:, i], target[:, i])\n try:\n aucs[i].add(mask_out, mask_tar)\n aps[i].add(mask_out, mask_tar)\n except:\n continue\n\n return aucs, aps\n\n def MSE__metric(self, output, target):\n out_dim = 1\n mses = [tnt.meter.MSEMeter() for i in range(out_dim)]\n\n mses[0].add(output[:, -1], target[:, -1])\n\n return mses\n\n def ACC_metric(self, output, target):\n mask_out, mask_tar = self.mask_pred(output, target)\n\n ones = torch.ones(mask_out.shape).int().to(self.env.device)\n zeros = torch.zeros(mask_out.shape).int().to(self.env.device)\n\n pred = torch.where(mask_out > 0.5, ones, zeros)\n correct = pred.eq(mask_tar.int()).sum().item()\n total = len(mask_tar)\n\n return correct, total\n\n\n def Per_print(self, correct=None, total=None, aucs=None, aps=None, mses=None):\n labels = self.env.labels\n\n out_dim = len(aucs)\n\n percent = 100. * correct / total\n\n logger.info(f\"accuracy {correct}/{total} \"\n f\"({percent:.2f}%)\")\n\n p = PrettyTable()\n p.field_names = [\"findings\", \"auroc score\", \"ap score\"]\n auc_cnt = out_dim\n for i in range(out_dim):\n try:\n #p.add_row([labels[i], f\"{aucs[i].value()[0]:.4f}\", f\"{aps[i].value()[0]:.4f}\"])\n p.add_row([f'E-{labels[i]}', f\"{aucs[i].value()[0]:.4f}\", f\"{aps[i].value()[0]:.4f}\"])\n except:\n p.add_row([labels[i], \"-\", \"-\"])\n\n try:\n list_aucs=[]\n for k in aucs:\n if type(k.value()) == tuple:\n if np.isnan(k.value()[0]) == False:\n list_aucs.append(k.value()[0])\n\n list_aps=[]\n for k in aps:\n if type(k.value()) == torch.Tensor:\n if np.isnan(k.value()[0]) == False:\n list_aps.append(k.value()[0])\n\n ave_auc = np.mean(list_aucs)\n ave_ap = np.mean(list_aps)\n tbl_str = p.get_string(title=f\"Ensemble-performance (avg auc {ave_auc:.4f}, mean ap {ave_ap:.4f})\")\n logger.info(f\"\\n{tbl_str}\")\n except:\n print(\"We cannot calcuate average acu scores\")\n ave_auc = 0\n ave_ap = 0\n\n\n def test(self, epoch, test_loader, fl_save=False):\n test_set = test_loader.dataset\n out_dim = self.env.out_dim\n labels = self.env.labels\n\n aucs = [tnt.meter.AUCMeter() for i in range(out_dim)]\n aps = [tnt.meter.APMeter() for i in range(out_dim)]\n\n CxrDataset.eval()\n self.env.model.eval()\n\n with torch.no_grad():\n correct = 0\n total = 0\n\n predict_seq = torch.FloatTensor().to(self.env.device)\n target_seq = torch.FloatTensor().to(self.env.device)\n\n tqdm_desc = f'testing '\n t = tqdm(enumerate(test_loader), total=len(test_loader), desc=tqdm_desc,\n dynamic_ncols=True)\n\n for bt_idx, tp_data in t:\n output, target = self.test_batch(tp_data)\n\n # Network outputs\n predict_seq = torch.cat((predict_seq, F.sigmoid(output)), dim=0)\n target_seq = torch.cat((target_seq, target), dim=0)\n\n for i in range(out_dim):\n mask_out, mask_tar = self.mask_pred(output[:, i], target[:, i])\n try:\n aucs[i].add(mask_out, mask_tar)\n aps[i].add(mask_out, mask_tar)\n except:\n continue\n\n mask_out, mask_tar = self.mask_pred(output, target)\n\n ones = torch.ones(mask_out.shape).int().to(self.env.device)\n zeros = torch.zeros(mask_out.shape).int().to(self.env.device)\n\n pred = torch.where(mask_out > 0., ones, zeros)\n correct += pred.eq(mask_tar.int()).sum().item()\n total += len(mask_tar)\n #pred = torch.where(output > 0., ones, zeros)\n #correct += pred.eq(target.int()).sum().item()\n\n #total = len(test_loader.sampler) * out_dim\n percent = 100. * correct / total\n\n logger.info(f\"val epoch {epoch:03d}: \"\n f\"accuracy {correct}/{total} \"\n f\"({percent:.2f}%)\")\n\n p = PrettyTable()\n p.field_names = [\"findings\", \"auroc score\", \"ap score\"]\n auc_cnt = out_dim\n for i in range(out_dim):\n try:\n p.add_row([labels[i], f\"{aucs[i].value()[0]:.4f}\", f\"{aps[i].value()[0]:.4f}\"])\n except:\n p.add_row([labels[i], \"-\", \"-\"])\n\n if fl_save:\n self.roc_evaluation(test_set, predict_seq, target_seq, labels)\n\n if self.fl_prob:\n self.df_prob = pd.DataFrame()\n self.df_prob['PATH_CHECK'] = test_set.entries['PATH']\n self.df_prob['PROB'] = predict_seq.cpu().numpy()[:, self.id_prob]\n\n try:\n list_aucs=[]\n for k in aucs:\n if type(k.value()) == tuple:\n if np.isnan(k.value()[0]) == False:\n list_aucs.append(k.value()[0])\n\n list_aps=[]\n for k in aps:\n if type(k.value()) == torch.Tensor:\n if np.isnan(k.value()[0]) == False:\n list_aps.append(k.value()[0])\n \n ave_auc = np.mean(list_aucs)\n ave_ap = np.mean(list_aps)\n\n tbl_str = p.get_string(title=f\"performance (avg auc {ave_auc:.4f}, mean ap {ave_ap:.4f})\")\n logger.info(f\"\\n{tbl_str}\")\n except:\n print(\"We cannot calcuate average auc scores\")\n ave_auc = 0\n ave_ap = 0\n\n self.pf_metric[f'accuracy'].append((epoch, correct / total))\n self.pf_metric[f'auc_score'].append((epoch, ave_auc))\n self.pf_metric[f'ap_score'].append((epoch, ave_ap))\n\n return ave_auc, predict_seq.cpu().numpy(), target_seq.cpu().numpy()\n\n def mask_pred(self, output, target):\n mask_one = torch.ones(output.shape, dtype=torch.uint8, device=self.env.device)\n mask_zero = torch.zeros(output.shape, dtype=torch.uint8, device=self.env.device)\n\n #mask = torch.where(target == -1, mask_zero, mask_one)\n mask = torch.where(target == -1, mask_zero, mask_one).bool()\n mask_output = output.masked_select(mask.to(self.env.device))\n mask_target = target.masked_select(mask.to(self.env.device))\n\n return mask_output, mask_target\n\n def test_batch(self, tp_data, fl_input=False):\n # to support different types of models.\n if self.env.type == 0:\n data = tp_data[0]\n target = tp_data[1]\n info = tp_data[2]\n data, target, info = data.to(self.env.device), target.to(self.env.device), info.to(self.env.device)\n #data, target = data.to(self.env.device), target.to(self.env.device)\n #network output\n output = self.env.model(data)\n elif self.env.type == 1:\n data1 = tp_data[0]\n data2 = tp_data[1]\n target = tp_data[2]\n data1, data2, target = data1.to(self.env.device), data2.to(self.env.device), target.to(self.env.device)\n #network output\n output = self.env.model(data1, data2)\n elif self.env.type == 3:\n data = tp_data[0]\n target = tp_data[1]\n info = tp_data[2]\n data, target, info = data.to(self.env.device), target.to(self.env.device), info.to(self.env.device)\n #network output\n output = self.env.model(data, info)\n\n if fl_input == False:\n return output, target\n else:\n return data, info, output\n\n\n def gradcam_data(self, test_loader, hmp_dims=(512,512), ens_flg=False, cams_ens=None, prob_ens=None):\n # threshold to draw a heatmap\n out_dim = self.env.out_dim\n\n CxrDataset.eval()\n self.env.model.eval()\n #with torch.no_grad():\n gradcam_res_list = []\n gradcam_path_list = []\n\n cams = np.zeros((len(test_loader), len(self.cls_gradcam), 16, 16))\n\n grad_cam = GradCam(self.env.model, self.env.type)\n for batch_idx, (data, target, info) in enumerate(test_loader):\n #data, target = data.to(self.env.device), target.to(self.env.device)\n data, target, info = data.to(self.env.device), target.to(self.env.device), info.to(self.env.device)\n # Grad CAM\n #grad_cam = GradCam(self.env.model, self.env.type)\n if self.cls_gradcam == None:\n gradcam_res, gradcam_path = self.gradcam_save_maxcls(grad_cam, data, test_loader, batch_idx, hmp_dims, info)\n else:\n if self.fl_ensemble:\n cam = self.gradcam_save_argcls_ens(grad_cam, data, test_loader, batch_idx, hmp_dims, info, ens_flg=ens_flg, cams_ens=cams_ens, prob_ens=prob_ens)\n else:\n gradcam_res, gradcam_path = self.gradcam_save_argcls(grad_cam, data, test_loader, batch_idx, hmp_dims, info)\n\n try:\n if self.fl_ensemble:\n cams[batch_idx, :, :, :] = cam\n else:\n gradcam_res_list.append(gradcam_res.tolist())\n gradcam_path_list.append(gradcam_path)\n\n except AttributeError as e:\n print(\"No GradCam result?\")\n\n if False:\n self.gradcam_thumbnail()\n\n\n return gradcam_res_list, gradcam_path_list, cams\n\n def gradcam_save_maxcls(self, grad_cam, data, test_loader, batch_idx, hmp_dims, info):\n if self.env.type == 3:\n cam, prob, tcls = grad_cam.generate_cam(data, info)\n else:\n cam, prob, tcls = grad_cam.generate_cam(data)\n\n noPlotflg = np.array([-1])\n # when we draw gradcam, we have to batch size as 1.\n file_name = test_loader.dataset.entries['PATH'][batch_idx]\n path_name = file_name.split(\".\")[0]\n\n if prob >= self.th_gradcam:\n target_class = self.env.labels[tcls]\n label_list = re.split(' \\- |\\/| ', target_class)\n label_name = \"_\".join(label_list)\n path_name = \"_\".join([path_name, label_name])\n\n cam_rs = save_class_activation_images(data, cam, self.pt_runtime.joinpath(f\"gradcam_image\"), path_name, hmp_dims)\n return cam_rs, path_name\n else:\n cam_rs = save_class_activation_images(data, noPlotflg, self.pt_runtime.joinpath(\"gradcam_image\"), path_name, hmp_dims)\n return None, None\n\n def gradcam_save_argcls(self, grad_cam, data, test_loader, batch_idx, hmp_dims, info):\n\n if self.cls_gradcam[0] == 'all':\n self.cls_gradcam = self.env.labels\n\n for i, nm_tcls in enumerate(self.cls_gradcam):\n ## need to implement to find index among self.env.labels from string of target class\n ## code start here!!!!\n id_tcls = self.env.labels.index(nm_tcls)\n if self.env.type == 3:\n cam, prob, tcls = grad_cam.generate_cam(data, info, target_class=id_tcls)\n else:\n cam_w = self.env.model.module.main.classifier.weight[id_tcls].cpu().detach().numpy()\n cam, prob, tcls, _ = grad_cam.generate_cam(data, target_class=id_tcls, cam_w=cam_w)\n noPlotflg = np.array([-1])\n # when we draw gradcam, we have to batch size as 1.\n file_name = test_loader.dataset.entries['PATH'][batch_idx]\n path_name = file_name.split(\".\")[0]\n\n target_class = self.env.labels[tcls]\n label_list = re.split(' \\- |\\/| ', target_class)\n label_name = \"_\".join(label_list)\n label_name = label_name.strip('>.').split('>')[-1]\n #path_name = \"_\".join([f'{int(prob*1000):04d}', path_name, label_name])\n\n if prob >= self.th_gradcam:\n cam_rs = save_class_activation_images(data, cam, self.pt_runtime.joinpath(f\"gradcam_image_{label_name}\"), path_name, hmp_dims)\n\n cam_list=[]\n path_list=[]\n\n path_list.append(path_name)\n return cam_list, path_list\n\n def gradcam_save_argcls_ens(self, grad_cam, data, test_loader, batch_idx, hmp_dims, info, ens_flg=False, cams_ens=None, prob_ens=None):\n\n if self.cls_gradcam[0] == 'all':\n self.cls_gradcam = self.env.labels\n\n cams = np.zeros((len(self.cls_gradcam), 16, 16))\n for i, nm_tcls in enumerate(self.cls_gradcam):\n ## need to implement to find index among self.env.labels from string of target class\n ## code start here!!!!\n id_tcls = self.env.labels.index(nm_tcls)\n cam_w = self.env.model.module.main.classifier.weight[id_tcls].cpu().detach().numpy()\n if prob_ens[batch_idx, id_tcls].item() >= self.th_gradcam:\n if ens_flg == True:\n cam, prob, tcls, cam_low = grad_cam.generate_cam(data, target_class=id_tcls, cam_w=cam_w, ens_flg=True, ens_cam=cams_ens[batch_idx, i, :, :])\n cams[i, :, :] = cam_low\n\n noPlotflg = np.array([-1])\n # when we draw gradcam, we have to batch size as 1.\n file_name = test_loader.dataset.entries['PATH'][batch_idx]\n path_name = file_name.split(\".\")[0]\n\n label_name = print_label_name[tcls]\n\n if ATLAS_GEN:\n label_name = f\"ATLAS_{atlas_name}\"\n\n #if prob_ens[batch_idx, id_tcls].item() >= self.th_gradcam:\n if ATLAS_GEN:\n cam_rs = save_class_activation_images(data, cam, self.pt_runtime.joinpath(f\"{label_name}\"), path_name, hmp_dims)\n else:\n if '/' in path_name:\n self.pt_runtime.joinpath(f\"explain_sample/{self.f_name}/{label_name}/{path_name}\").parent.mkdir(parents=True, exist_ok=True)\n cam_rs = save_class_activation_images(data, cam, self.pt_runtime.joinpath(f\"explain_sample/{self.f_name}/{label_name}\"), path_name, hmp_dims)\n else:\n #review_cam\n cam, prob, tcls, cam_low = grad_cam.generate_cam(data, target_class=id_tcls, cam_w=cam_w, th_cam=0.5)\n cams[i, :, :] = cam_low\n\n return cams\n\n def roc_evaluation(self, test_set, predict_seq, target_seq, labels):\n out_dim = self.env.out_dim\n df_data = pd.DataFrame()\n df_data['PATH'] = test_set.entries['PATH']\n for i in range(out_dim):\n df_data[f'{labels[i]}'] = predict_seq.cpu().numpy()[:, i]\n df_data[f'{labels[i]}_GT'] = target_seq.cpu().numpy()[:, i]\n\n t = self.pt_runtime.joinpath('roc_result')\n Path.mkdir(t, parents=True, exist_ok=True)\n df_data.to_excel(t.joinpath('save_predicted_probabilities.xlsx'))\n\n roc_dim = out_dim \n for i in range(roc_dim):\n mask_out, mask_tar = self.mask_pred(predict_seq[:, i], target_seq[:, i])\n if mask_tar.cpu().numpy().size != 0 :\n fpr, tpr, thresholds = roc_curve(mask_tar.cpu().numpy(), mask_out.cpu().numpy())\n pre, rec, thresholds_pr = precision_recall_curve(mask_tar.cpu().numpy(), mask_out.cpu().numpy())\n #logger.debug(f\"{predict_seq.cpu().numpy()}\")\n df = pd.DataFrame()\n df[f'specificity'] = 1-fpr\n df[f'sensitivity'] = tpr\n df[f'thresholds'] = thresholds\n\n label_name = print_label_name[i]\n df.to_excel(t.joinpath(f'save_{i:03d}_{label_name}_sensitivity_specificity.xlsx'))\n del df\n\n if False:\n # ROC plot\n fig, (ax1, ax2) = plt.subplots(1,2)\n ax1.plot(fpr, tpr, color = 'darkorange', lw = 2, label = 'ROC curve')\n ax1.set_title(f'ROC curve for {labels[i]}')\n ax1.set(xlabel='False positive rate', ylabel='True positive rate')\n # PR plot\n ax2.plot(rec, pre, color = 'darkorange', lw = 2, label = 'Precision-Recall curve')\n ax2.set_title(f'Precision-Recall curve')\n ax2.set(xlabel='Recall', ylabel='Precision')\n plt.savefig(t.joinpath(f'{i:03d}_{label_name}_curve.png'))\n else:\n # ROC plot\n fig, ax1 = plt.subplots(1,1)\n ax1.plot(fpr, tpr, color = 'darkorange', lw = 2, label = f'{label_name}')\n ax1.set_title(f'ROC curve for {label_name}')\n ax1.set(xlabel='False positive rate', ylabel='True positive rate')\n plt.savefig(t.joinpath(f'{i:03d}_{label_name}_curve.png'))\n\n\n def save_prob(self, input_file, save_path):\n df = pd.read_csv(input_file)\n df.insert(6, 'prob', self.df_prob.PROB)\n df.insert(6, 'path_check', self.df_prob.PATH_CHECK)\n\n df.to_excel(save_path)\n\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Testng Our Explainable AI Model on CXR\")\n parser.add_argument('--cuda', default=None, type=str, help=\"use GPUs with its device ids, separated by commas\")\n\n args = parser.parse_args()\n args.in_dim = 1\n args.out_dim = 21\n args.labels = None\n args.paths = None\n args.runtime_dir = 'autolabeling'\n args.type = 0\n args.pretr_net = 'pa_feat_model'\n args.gradcam = False\n args.gradcam_cls = None\n args.fl_save = False\n args.id_prob = None\n args.test_csv = 'autolabeling_5_features_490_cases.csv'\n args.arch = None\n args.Nens = 6\n args.exai = True\n args.simname = 'Outputs'\n args.seed = -1\n\n runtime_path, device = initialize(args, fl_demo=True)\n fl_ensemble = False if args.Nens == 1 else True\n\n # start training\n env = TestEnvironment(device, mtype=args.type, in_dim=args.in_dim, out_dim=args.out_dim, name_labels=args.labels, name_paths=args.paths, testset_csv=args.test_csv, name_model=args.arch, r_seed=args.seed)\n t = Tester(env, pt_runtime=runtime_path, fn_net=args.pretr_net, fl_gradcam=args.gradcam, cls_gradcam=args.gradcam_cls, id_prob=args.id_prob, fl_ensemble=fl_ensemble, fl_exai=args.exai, f_name=args.simname, f_csv=args.test_csv)\n\n if(fl_ensemble):\n t.test_ensemble_evaluation(fl_save=args.fl_save, n_ens = args.Nens)\n else:\n t.test_evaluation(fl_save=args.fl_save)\n\n"
] | [
[
"torch.ones",
"torch.FloatTensor",
"torch.nn.functional.sigmoid",
"pandas.read_csv",
"pandas.DataFrame",
"torch.no_grad",
"matplotlib.pyplot.subplots",
"torch.where",
"torch.from_numpy",
"torch.zeros",
"numpy.array",
"torch.cat",
"numpy.mean"
]
] |
aureocarneiro/sardana | [
"43644c9966d73c7a9023b53e97b530f3ea0dfb39"
] | [
"src/sardana/macroserver/macros/scan.py"
] | [
"##############################################################################\n##\n# This file is part of Sardana\n##\n# http://www.sardana-controls.org/\n##\n# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain\n##\n# Sardana is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n##\n# Sardana is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n##\n# You should have received a copy of the GNU Lesser General Public License\n# along with Sardana. If not, see <http://www.gnu.org/licenses/>.\n##\n##############################################################################\n\n\"\"\"\n Macro library containning scan macros for the macros server Tango device\n server as part of the Sardana project.\n\"\"\"\n\n__all__ = [\"a2scan\", \"a3scan\", \"a4scan\", \"amultiscan\", \"aNscan\", \"ascan\",\n \"d2scan\", \"d3scan\", \"d4scan\", \"dmultiscan\", \"dNscan\", \"dscan\",\n \"fscan\", \"mesh\", \"timescan\", \"rscan\", \"r2scan\", \"r3scan\",\n \"a2scanc\", \"a3scanc\", \"a4scanc\", \"ascanc\",\n \"d2scanc\", \"d3scanc\", \"d4scanc\", \"dscanc\",\n \"meshc\",\n \"a2scanct\", \"a3scanct\", \"a4scanct\", \"ascanct\", \"meshct\",\n \"scanhist\", \"getCallable\", \"UNCONSTRAINED\",\n \"scanstats\"]\n\n__docformat__ = 'restructuredtext'\n\nimport os\nimport copy\nimport datetime\n\nimport numpy\n\nfrom taurus.core.util import SafeEvaluator\n\nfrom sardana.macroserver.msexception import UnknownEnv\nfrom sardana.macroserver.macro import Hookable, Macro, Type, Table, List\nfrom sardana.macroserver.scan.gscan import SScan, CTScan, HScan, \\\n MoveableDesc, CSScan, TScan\nfrom sardana.util.motion import MotionPath\nfrom sardana.util.tree import BranchNode\n\nUNCONSTRAINED = \"unconstrained\"\n\nStepMode = 's'\n# TODO: change it to be more verbose e.g. ContinuousSwMode\nContinuousMode = 'c'\nContinuousHwTimeMode = 'ct'\nHybridMode = 'h'\n\n\ndef getCallable(repr):\n \"\"\"\n returns a function .\n Ideas: repr could be an URL for a file where the function is contained,\n or be evaluable code, or a pickled function object,...\n\n In any case, the return from it should be a callable of the form:\n f(x1,x2) where x1, x2 are points in the moveable space and the return value\n of f is True if the movement from x1 to x2 is allowed. False otherwise\n \"\"\"\n if repr == UNCONSTRAINED:\n return lambda x1, x2: True\n else:\n return lambda: None\n\n\n# TODO: remove starts\ndef _calculate_positions(moveable_node, start, end):\n '''Function to calculate starting and ending positions on the physical\n motors level.\n :param moveable_node: (BaseNode) node representing a moveable.\n Can be a BranchNode representing a PseudoMotor,\n or a LeafNode representing a PhysicalMotor).\n :param start: (float) starting position of the moveable\n :param end: (float) ending position of the moveable\n\n :return: (list<(float,float)>) a list of tuples comprising starting\n and ending positions. List order is important and preserved.'''\n start_positions = []\n end_positions = []\n if isinstance(moveable_node, BranchNode):\n pseudo_node = moveable_node\n moveable = pseudo_node.data\n moveable_nodes = moveable_node.children\n starts = moveable.calcPhysical(start)\n ends = moveable.calcPhysical(end)\n for moveable_node, start, end in zip(moveable_nodes, starts,\n ends):\n _start_positions, _end_positions = _calculate_positions(\n moveable_node,\n start, end)\n start_positions += _start_positions\n end_positions += _end_positions\n else:\n start_positions = [start]\n end_positions = [end]\n\n return start_positions, end_positions\n\n\nclass aNscan(Hookable):\n \"\"\"N-dimensional scan. This is **not** meant to be called by the user,\n but as a generic base to construct ascan, a2scan, a3scan,...\"\"\"\n\n hints = {'scan': 'aNscan', 'allowsHooks': ('pre-scan', 'pre-move',\n 'post-move', 'pre-acq',\n 'post-acq', 'post-step',\n 'post-scan')}\n # env = ('ActiveMntGrp',)\n\n def _prepare(self, motorlist, startlist, endlist, scan_length, integ_time,\n mode=StepMode, latency_time=0, **opts):\n\n self.motors = motorlist\n self.starts = numpy.array(startlist, dtype='d')\n self.finals = numpy.array(endlist, dtype='d')\n self.mode = mode\n self.integ_time = integ_time\n self.opts = opts\n if len(self.motors) == self.starts.size == self.finals.size:\n self.N = self.finals.size\n else:\n raise ValueError(\n 'Moveablelist, startlist and endlist must all be same length')\n\n moveables = []\n for m, start, final in zip(self.motors, self.starts, self.finals):\n moveables.append(MoveableDesc(moveable=m, min_value=min(\n start, final), max_value=max(start, final)))\n moveables[0].is_reference = True\n\n env = opts.get('env', {})\n constrains = [getCallable(cns) for cns in opts.get(\n 'constrains', [UNCONSTRAINED])]\n extrainfodesc = opts.get('extrainfodesc', [])\n\n # Hooks are not always set at this point. We will call getHooks\n # later on in the scan_loop\n # self.pre_scan_hooks = self.getHooks('pre-scan')\n # self.post_scan_hooks = self.getHooks('post-scan'\n\n if mode == StepMode:\n self.nr_interv = scan_length\n self.nb_points = self.nr_interv + 1\n self.interv_sizes = (self.finals - self.starts) / self.nr_interv\n self.name = opts.get('name', 'a%iscan' % self.N)\n self._gScan = SScan(self, self._stepGenerator,\n moveables, env, constrains, extrainfodesc)\n elif mode in [ContinuousMode, ContinuousHwTimeMode]:\n # TODO: probably not 100% correct,\n # the idea is to allow passing a list of waypoints\n if isinstance(endlist[0], list):\n self.waypoints = self.finals\n else:\n self.waypoints = [self.finals]\n self.nr_waypoints = len(self.waypoints)\n if mode == ContinuousMode:\n self.slow_down = scan_length\n # aNscans will only have two waypoints (the start and the final\n # positions)\n self.nr_waypoints = 2\n self.way_lengths = (\n self.finals - self.starts) / (self.nr_waypoints - 1)\n self.name = opts.get('name', 'a%iscanc' % self.N)\n self._gScan = CSScan(self, self._waypoint_generator,\n self._period_generator, moveables, env,\n constrains, extrainfodesc)\n elif mode == ContinuousHwTimeMode:\n self.nr_interv = scan_length\n self.nb_points = self.nr_interv + 1\n mg_name = self.getEnv('ActiveMntGrp')\n mg = self.getMeasurementGroup(mg_name)\n mg_latency_time = mg.getLatencyTime()\n if mg_latency_time > latency_time:\n self.info(\"Choosing measurement group latency time: %f\" %\n mg_latency_time)\n latency_time = mg_latency_time\n self.latency_time = latency_time\n self.name = opts.get('name', 'a%iscanct' % self.N)\n self._gScan = CTScan(self, self._waypoint_generator_hwtime,\n moveables,\n env,\n constrains,\n extrainfodesc)\n elif mode == HybridMode:\n self.nr_interv = scan_length\n self.nb_points = self.nr_interv + 1\n self.interv_sizes = (self.finals - self.starts) / self.nr_interv\n self.name = opts.get('name', 'a%iscanh' % self.N)\n self._gScan = HScan(self, self._stepGenerator,\n moveables, env, constrains, extrainfodesc)\n else:\n raise ValueError('invalid value for mode %s' % mode)\n # _data is the default member where the Macro class stores the data.\n # Assign the date produced by GScan (or its subclasses) to it so all\n # the Macro infrastructure related to the data works e.g. getter,\n # property, etc. Ideally this should be done by the data setter\n # but this is available in the Macro class and we inherit from it\n # latter. More details in sardana-org/sardana#683.\n self._data = self._gScan.data\n\n def _stepGenerator(self):\n step = {}\n step[\"integ_time\"] = self.integ_time\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n step[\"post-move-hooks\"] = self.getHooks('post-move')\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = self.getHooks('post-acq') + self.getHooks(\n '_NOHINTS_')\n step[\"post-step-hooks\"] = self.getHooks('post-step')\n\n step[\"check_func\"] = []\n for point_no in range(self.nb_points):\n step[\"positions\"] = self.starts + point_no * self.interv_sizes\n step[\"point_id\"] = point_no\n yield step\n\n def _waypoint_generator(self):\n step = {}\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n step[\"post-move-hooks\"] = self.getHooks('post-move')\n step[\"check_func\"] = []\n step[\"slow_down\"] = self.slow_down\n for point_no in range(self.nr_waypoints):\n step[\"positions\"] = self.starts + point_no * self.way_lengths\n step[\"waypoint_id\"] = point_no\n yield step\n\n def _waypoint_generator_hwtime(self):\n\n # CScan in its constructor populates a list of data structures - trees.\n # Each tree represent one Moveables with its hierarchy of inferior\n # moveables.\n moveables_trees = self._gScan.get_moveables_trees()\n step = {}\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n post_move_hooks = self.getHooks(\n 'post-move') + [self._fill_missing_records]\n step[\"post-move-hooks\"] = post_move_hooks\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = self.getHooks('post-acq') + self.getHooks(\n '_NOHINTS_')\n step[\"check_func\"] = []\n step[\"active_time\"] = self.nb_points * (self.integ_time\n + self.latency_time)\n step[\"positions\"] = []\n step[\"start_positions\"] = []\n starts = self.starts\n for point_no, waypoint in enumerate(self.waypoints):\n for start, end, moveable_tree in zip(starts, waypoint,\n moveables_trees):\n moveable_root = moveable_tree.root()\n start_positions, end_positions = _calculate_positions(\n moveable_root, start, end)\n step[\"start_positions\"] += start_positions\n step[\"positions\"] += end_positions\n step[\"waypoint_id\"] = point_no\n starts = waypoint\n yield step\n\n def _period_generator(self):\n step = {}\n step[\"integ_time\"] = self.integ_time\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = (self.getHooks('post-acq') +\n self.getHooks('_NOHINTS_'))\n step[\"post-step-hooks\"] = self.getHooks('post-step')\n step[\"check_func\"] = []\n step['extrainfo'] = {}\n point_no = 0\n while(True):\n point_no += 1\n step[\"point_id\"] = point_no\n yield step\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n def getTimeEstimation(self):\n gScan = self._gScan\n mode = self.mode\n it = gScan.generator()\n v_motors = gScan.get_virtual_motors()\n curr_pos = gScan.motion.readPosition()\n total_time = 0.0\n if mode == StepMode:\n # calculate motion time\n max_step0_time, max_step_time = 0.0, 0.0\n # first motion takes longer, all others should be \"equal\"\n step0 = next(it)\n for v_motor, start, stop, length in zip(v_motors, curr_pos,\n step0['positions'],\n self.interv_sizes):\n path0 = MotionPath(v_motor, start, stop)\n path = MotionPath(v_motor, 0, length)\n max_step0_time = max(max_step0_time, path0.duration)\n max_step_time = max(max_step_time, path.duration)\n motion_time = max_step0_time + self.nr_interv * max_step_time\n # calculate acquisition time\n acq_time = self.nb_points * self.integ_time\n total_time = motion_time + acq_time\n\n elif mode == ContinuousMode:\n total_time = gScan.waypoint_estimation()\n # TODO: add time estimation for ContinuousHwTimeMode\n return total_time\n\n def getIntervalEstimation(self):\n mode = self.mode\n if mode in [StepMode, ContinuousHwTimeMode, HybridMode]:\n return self.nr_interv\n elif mode == ContinuousMode:\n return self.nr_waypoints\n\n def _fill_missing_records(self):\n # fill record list with dummy records for the final padding\n nb_of_points = self.nb_points\n scan = self._gScan\n nb_of_records = len(scan.data.records)\n missing_records = nb_of_points - nb_of_records\n scan.data.initRecords(missing_records)\n\n def _get_nr_points(self):\n msg = (\"nr_points is deprecated since version 3.0.3. \"\n \"Use nb_points instead.\")\n self.warning(msg)\n return self.nb_points\n\n nr_points = property(_get_nr_points)\n\nclass dNscan(aNscan):\n \"\"\"\n same as aNscan but it interprets the positions as being relative to the\n current positions and upon completion, it returns the motors to their\n original positions\n \"\"\"\n\n hints = copy.deepcopy(aNscan.hints)\n hints['scan'] = 'dNscan'\n\n def _prepare(self, motorlist, startlist, endlist, scan_length,\n integ_time, mode=StepMode, **opts):\n self._motion = self.getMotion([m.getName() for m in motorlist])\n self.originalPositions = numpy.array(\n self._motion.readPosition(force=True))\n starts = numpy.array(startlist, dtype='d') + self.originalPositions\n finals = numpy.array(endlist, dtype='d') + self.originalPositions\n aNscan._prepare(self, motorlist, starts, finals,\n scan_length, integ_time, mode=mode, **opts)\n\n def do_restore(self):\n self.info(\"Returning to start positions...\")\n self._motion.move(self.originalPositions)\n\n\nclass ascan(aNscan, Macro):\n \"\"\"\n Do an absolute scan of the specified motor.\n ascan scans one motor, as specified by motor. The motor starts at the\n position given by start_pos and ends at the position given by final_pos.\n The step size is (start_pos-final_pos)/nr_interv. The number of data\n points collected will be nr_interv+1. Count time is given by time which\n if positive, specifies seconds and if negative, specifies monitor counts.\n \"\"\"\n\n param_def = [\n ['motor', Type.Moveable, None, 'Moveable to move'],\n ['start_pos', Type.Float, None, 'Scan start position'],\n ['final_pos', Type.Float, None, 'Scan final position'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, motor, start_pos, final_pos, nr_interv, integ_time,\n **opts):\n self._prepare([motor], [start_pos], [final_pos],\n nr_interv, integ_time, **opts)\n\n\nclass a2scan(aNscan, Macro):\n \"\"\"\n two-motor scan.\n a2scan scans two motors, as specified by motor1 and motor2.\n Each motor moves the same number of intervals with starting and ending\n positions given by start_pos1 and final_pos1, start_pos2 and final_pos2,\n respectively. The step size for each motor is:\n (start_pos-final_pos)/nr_interv\n The number of data points collected will be nr_interv+1.\n Count time is given by time which if positive, specifies seconds and\n if negative, specifies monitor counts.\n \"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, motor1, start_pos1, final_pos1, motor2, start_pos2,\n final_pos2, nr_interv, integ_time, **opts):\n self._prepare([motor1, motor2], [start_pos1, start_pos2], [\n final_pos1, final_pos2], nr_interv, integ_time, **opts)\n\n\nclass a3scan(aNscan, Macro):\n \"\"\"three-motor scan .\n a3scan scans three motors, as specified by motor1, motor2 and motor3.\n Each motor moves the same number of intervals with starting and ending\n positions given by start_pos1 and final_pos1, start_pos2 and final_pos2,\n start_pos3 and final_pos3, respectively.\n The step size for each motor is (start_pos-final_pos)/nr_interv.\n The number of data points collected will be nr_interv+1.\n Count time is given by time which if positive, specifies seconds and\n if negative, specifies monitor counts.\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, nr_interv,\n integ_time, **opts):\n self._prepare([m1, m2, m3], [s1, s2, s3], [f1, f2, f3],\n nr_interv, integ_time, **opts)\n\n\nclass a4scan(aNscan, Macro):\n \"\"\"four-motor scan .\n a4scan scans four motors, as specified by motor1, motor2, motor3 and\n motor4.\n Each motor moves the same number of intervals with starting and ending\n positions given by start_posN and final_posN (for N=1,2,3,4).\n The step size for each motor is (start_pos-final_pos)/nr_interv.\n The number of data points collected will be nr_interv+1.\n Count time is given by time which if positive, specifies seconds and\n if negative, specifies monitor counts.\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['motor4', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos4', Type.Float, None, 'Scan start position 3'],\n ['final_pos4', Type.Float, None, 'Scan final position 3'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, m4, s4, f4,\n nr_interv, integ_time, **opts):\n self._prepare([m1, m2, m3, m4], [s1, s2, s3, s4], [\n f1, f2, f3, f4], nr_interv, integ_time, **opts)\n\n\nclass amultiscan(aNscan, Macro):\n \"\"\"\n Multiple motor scan.\n amultiscan scans N motors, as specified by motor1, motor2,...,motorN.\n Each motor moves the same number of intervals with starting and ending\n positions given by start_posN and final_posN (for N=1,2,...).\n The step size for each motor is (start_pos-final_pos)/nr_interv.\n The number of data points collected will be nr_interv+1.\n Count time is given by time which if positive, specifies seconds and\n if negative, specifies monitor counts.\n \"\"\"\n\n param_def = [\n ['motor_start_end_list',\n [['motor', Type.Moveable, None, 'Moveable to move'],\n ['start', Type.Float, None, 'Starting position'],\n ['end', Type.Float, None, 'Final position']],\n None, 'List of motor, start and end positions'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, *args, **opts):\n motors = args[0:-2:3]\n starts = args[1:-2:3]\n ends = args[2:-2:3]\n nr_interv = args[-2]\n integ_time = args[-1]\n\n self._prepare(motors, starts, ends, nr_interv, integ_time, **opts)\n\n\nclass dmultiscan(dNscan, Macro):\n \"\"\"\n Multiple motor scan relative to the starting positions.\n dmultiscan scans N motors, as specified by motor1, motor2,...,motorN.\n Each motor moves the same number of intervals If each motor is at a\n position X before the scan begins, it will be scanned from X+start_posN\n to X+final_posN (where N is one of 1,2,...)\n The step size for each motor is (start_pos-final_pos)/nr_interv.\n The number of data points collected will be nr_interv+1.\n Count time is given by time which if positive, specifies seconds and\n if negative, specifies monitor counts.\n \"\"\"\n\n param_def = [\n ['motor_start_end_list',\n [['motor', Type.Moveable, None, 'Moveable to move'],\n ['start', Type.Float, None, 'Starting position'],\n ['end', Type.Float, None, 'Final position']],\n None, 'List of motor, start and end positions'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, *args, **opts):\n motors = args[0:-2:3]\n starts = args[1:-2:3]\n ends = args[2:-2:3]\n nr_interv = args[-2]\n integ_time = args[-1]\n\n self._prepare(motors, starts, ends, nr_interv, integ_time, **opts)\n\n\nclass dscan(dNscan, Macro):\n \"\"\"motor scan relative to the starting position.\n dscan scans one motor, as specified by motor. If motor motor is at a\n position X before the scan begins, it will be scanned from X+start_pos\n to X+final_pos. The step size is (start_pos-final_pos)/nr_interv.\n The number of data points collected will be nr_interv+1. Count time is\n given by time which if positive, specifies seconds and if negative,\n specifies monitor counts. \"\"\"\n\n param_def = [\n ['motor', Type.Moveable, None, 'Moveable to move'],\n ['start_pos', Type.Float, None, 'Scan start position'],\n ['final_pos', Type.Float, None, 'Scan final position'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, motor, start_pos, final_pos, nr_interv, integ_time,\n **opts):\n self._prepare([motor], [start_pos], [final_pos],\n nr_interv, integ_time, **opts)\n\n\nclass d2scan(dNscan, Macro):\n \"\"\"two-motor scan relative to the starting position.\n d2scan scans two motors, as specified by motor1 and motor2.\n Each motor moves the same number of intervals. If each motor is at a\n position X before the scan begins, it will be scanned from X+start_posN\n to X+final_posN (where N is one of 1,2).\n The step size for each motor is (start_pos-final_pos)/nr_interv.\n The number of data points collected will be nr_interv+1.\n Count time is given by time which if positive, specifies seconds and\n if negative, specifies monitor counts.\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, motor1, start_pos1, final_pos1, motor2, start_pos2,\n final_pos2, nr_interv, integ_time, **opts):\n self._prepare([motor1, motor2], [start_pos1, start_pos2], [\n final_pos1, final_pos2], nr_interv, integ_time, **opts)\n\n\nclass d3scan(dNscan, Macro):\n \"\"\"three-motor scan .\n d3scan scans three motors, as specified by motor1, motor2 and motor3.\n Each motor moves the same number of intervals. If each motor is at a\n position X before the scan begins, it will be scanned from X+start_posN\n to X+final_posN (where N is one of 1,2,3)\n The step size for each motor is (start_pos-final_pos)/nr_interv.\n The number of data points collected will be nr_interv+1.\n Count time is given by time which if positive, specifies seconds and\n if negative, specifies monitor counts.\"\"\"\n\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, nr_interv,\n integ_time, **opts):\n self._prepare([m1, m2, m3], [s1, s2, s3], [f1, f2, f3],\n nr_interv, integ_time, **opts)\n\n\nclass d4scan(dNscan, Macro):\n \"\"\"four-motor scan relative to the starting positions\n a4scan scans four motors, as specified by motor1, motor2, motor3 and\n motor4.\n Each motor moves the same number of intervals. If each motor is at a\n position X before the scan begins, it will be scanned from X+start_posN\n to X+final_posN (where N is one of 1,2,3,4).\n The step size for each motor is (start_pos-final_pos)/nr_interv.\n The number of data points collected will be nr_interv+1.\n Count time is given by time which if positive, specifies seconds and\n if negative, specifies monitor counts.\n Upon termination, the motors are returned to their starting positions.\n \"\"\"\n\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['motor4', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos4', Type.Float, None, 'Scan start position 3'],\n ['final_pos4', Type.Float, None, 'Scan final position 3'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, m4, s4, f4,\n nr_interv, integ_time, **opts):\n self._prepare([m1, m2, m3, m4], [s1, s2, s3, s4], [\n f1, f2, f3, f4], nr_interv, integ_time, **opts)\n\n\nclass mesh(Macro, Hookable):\n \"\"\"2d grid scan.\n The mesh scan traces out a grid using motor1 and motor2.\n The first motor scans from m1_start_pos to m1_final_pos using the specified\n number of intervals. The second motor similarly scans from m2_start_pos\n to m2_final_pos. Each point is counted for for integ_time seconds\n (or monitor counts, if integ_time is negative).\n The scan of motor1 is done at each point scanned by motor2. That is, the\n first motor scan is nested within the second motor scan.\n \"\"\"\n\n hints = {'scan': 'mesh', 'allowsHooks': ('pre-scan', 'pre-move',\n 'post-move', 'pre-acq',\n 'post-acq', 'post-step',\n 'post-scan')}\n env = ('ActiveMntGrp',)\n\n param_def = [\n ['motor1', Type.Moveable, None, 'First motor to move'],\n ['m1_start_pos', Type.Float, None, 'Scan start position for first '\n 'motor'],\n ['m1_final_pos', Type.Float, None, 'Scan final position for first '\n 'motor'],\n ['m1_nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['motor2', Type.Moveable, None, 'Second motor to move'],\n ['m2_start_pos', Type.Float, None, 'Scan start position for second '\n 'motor'],\n ['m2_final_pos', Type.Float, None, 'Scan final position for second '\n 'motor'],\n ['m2_nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['bidirectional', Type.Boolean, False, 'Save time by scanning '\n 's-shaped']\n ]\n\n def prepare(self, m1, m1_start_pos, m1_final_pos, m1_nr_interv,\n m2, m2_start_pos, m2_final_pos, m2_nr_interv, integ_time,\n bidirectional, **opts):\n self.motors = [m1, m2]\n self.starts = numpy.array([m1_start_pos, m2_start_pos], dtype='d')\n self.finals = numpy.array([m1_final_pos, m2_final_pos], dtype='d')\n self.nr_intervs = numpy.array([m1_nr_interv, m2_nr_interv], dtype='i')\n self.nb_points = (m1_nr_interv + 1) * (m2_nr_interv + 1)\n self.integ_time = integ_time\n self.bidirectional_mode = bidirectional\n\n self.name = opts.get('name', 'mesh')\n\n generator = self._generator\n moveables = []\n for m, start, final in zip(self.motors, self.starts, self.finals):\n moveables.append(MoveableDesc(moveable=m,\n min_value=min(start, final),\n max_value=max(start, final)))\n moveables[0].is_reference = True\n env = opts.get('env', {})\n constrains = [getCallable(cns) for cns in opts.get(\n 'constrains', [UNCONSTRAINED])]\n\n # Hooks are not always set at this point. We will call getHooks\n # later on in the scan_loop\n # self.pre_scan_hooks = self.getHooks('pre-scan')\n # self.post_scan_hooks = self.getHooks('post-scan')\n\n self._gScan = SScan(self, generator, moveables, env, constrains)\n\n # _data is the default member where the Macro class stores the data.\n # Assign the date produced by GScan (or its subclasses) to it so all\n # the Macro infrastructure related to the data works e.g. getter,\n # property, etc.\n self.setData(self._gScan.data)\n\n def _generator(self):\n step = {}\n step[\"integ_time\"] = self.integ_time\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n step[\"post-move-hooks\"] = self.getHooks('post-move')\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = (self.getHooks('post-acq') +\n self.getHooks('_NOHINTS_'))\n step[\"post-step-hooks\"] = self.getHooks('post-step')\n step[\"check_func\"] = []\n m1start, m2start = self.starts\n m1end, m2end = self.finals\n points1, points2 = self.nr_intervs + 1\n point_no = 1\n m1_space = numpy.linspace(m1start, m1end, points1)\n m1_space_inv = numpy.linspace(m1end, m1start, points1)\n\n for i, m2pos in enumerate(numpy.linspace(m2start, m2end, points2)):\n space = m1_space\n if i % 2 != 0 and self.bidirectional_mode:\n space = m1_space_inv\n for m1pos in space:\n step[\"positions\"] = numpy.array([m1pos, m2pos])\n # TODO: maybe another ID would be better? (e.g. \"(A,B)\")\n step[\"point_id\"] = point_no\n point_no += 1\n yield step\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n\nclass dmesh(mesh):\n \"\"\"\n 2d relative grid scan.\n The relative mesh scan traces out a grid using motor1 and motor2.\n If first motor is at the position X before the scan begins, it will\n be scanned from X+m1_start_pos to X+m1_final_pos using the specified\n m1_nr_interv number of intervals. If the second motor is\n at the position Y before the scan begins, it will be scanned\n from Y+m2_start_pos to Y+m2_final_pos using the specified m2_nr_interv\n number of intervals.\n Each point is counted for the integ_time seconds (or monitor counts,\n if integ_time is negative).\n The scan of motor1 is done at each point scanned by motor2. That is, the\n first motor scan is nested within the second motor scan.\n Upon scan completion, it returns the motors to their original positions.\n \"\"\"\n\n hints = copy.deepcopy(mesh.hints)\n hints['scan'] = 'dmesh'\n\n env = copy.deepcopy(mesh.env)\n\n param_def = [\n ['motor1', Type.Moveable, None, 'First motor to move'],\n ['m1_start_pos', Type.Float, None, 'Scan start position for first '\n 'motor'],\n ['m1_final_pos', Type.Float, None, 'Scan final position for first '\n 'motor'],\n ['m1_nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['motor2', Type.Moveable, None, 'Second motor to move'],\n ['m2_start_pos', Type.Float, None, 'Scan start position for second '\n 'motor'],\n ['m2_final_pos', Type.Float, None, 'Scan final position for second '\n 'motor'],\n ['m2_nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['bidirectional', Type.Boolean, False, 'Save time by scanning '\n 's-shaped']\n ]\n\n def prepare(self, m1, m1_start_pos, m1_final_pos, m1_nr_interv,\n m2, m2_start_pos, m2_final_pos, m2_nr_interv, integ_time,\n bidirectional, **opts):\n self._motion = self.getMotion([m1, m2])\n self.originalPositions = numpy.array(\n self._motion.readPosition(force=True))\n start1 = self.originalPositions[0] + m1_start_pos\n start2 = self.originalPositions[1] + m2_start_pos\n final1 = self.originalPositions[0] + m1_final_pos\n final2 = self.originalPositions[1] + m2_final_pos\n mesh.prepare(self, m1, start1, final1, m1_nr_interv,\n m2, start2, final2, m2_nr_interv, integ_time,\n bidirectional, **opts)\n\n def do_restore(self):\n self.info(\"Returning to start positions...\")\n self._motion.move(self.originalPositions)\n\n\nclass fscan(Macro, Hookable):\n \"\"\"\n N-dimensional scan along user defined paths.\n The motion path for each motor is defined through the evaluation of a\n user-supplied function that is evaluated as a function of the independent\n variables.\n -independent variables are supplied through the indepvar string.\n The syntax for indepvar is \"x=expresion1,y=expresion2,...\"\n -If no indep vars need to be defined, write \"!\" or \"*\" or \"None\"\n -motion path for motor is generated by evaluating the corresponding\n function 'func'\n -Count time is given by integ_time. If integ_time is a scalar, then\n the same integ_time is used for all points. If it evaluates as an array\n (with same length as the paths), fscan will assign a different integration\n time to each acquisition point.\n -If integ_time is positive, it specifies seconds and if negative, specifies\n monitor counts.\n\n IMPORTANT Notes:\n -no spaces are allowed in the indepvar string.\n -all funcs must evaluate to the same number of points\n\n\n >>> fscan \"x=[1,3,5,7,9],y=arange(5)\" 0.1 motor1 x**2 motor2 sqrt(y*x+3)\n >>> fscan \"x=[1,3,5,7,9],y=arange(5)\" \"[0.1,0.2,0.3,0.4,0.5]\" motor1 x**2 \\\nmotor2 sqrt(y*x+3)\n \"\"\"\n\n # ['integ_time', Type.String, None, 'Integration time']\n hints = {'scan': 'fscan',\n 'allowsHooks': ('pre-scan', 'pre-move', 'post-move', 'pre-acq',\n 'post-acq', 'post-step', 'post-scan')}\n env = ('ActiveMntGrp',)\n\n param_def = [\n ['indepvars', Type.String, None, 'Independent Variables'],\n ['integ_time', Type.String, None, 'Integration time'],\n ['motor_funcs',\n [['motor', Type.Moveable, None, 'motor'],\n ['func', Type.String, None, 'curve defining path']],\n None, 'List of motor and path curves']\n ]\n\n def prepare(self, *args, **opts):\n if args[0].lower() in [\"!\", \"*\", \"none\", None]:\n indepvars = {}\n else:\n indepvars = SafeEvaluator({'dict': dict}).eval(\n 'dict(%s)' % args[0]) # create a dict containing the indepvars\n\n self.motors = [item[0] for item in args[2]]\n self.funcstrings = [item[1] for item in args[2]]\n\n globals_lst = [dict(list(zip(indepvars, values)))\n for values in zip(*list(indepvars.values()))]\n self.paths = [[SafeEvaluator(globals).eval(\n func) for globals in globals_lst] for func in self.funcstrings]\n\n self._integ_time = numpy.array(eval(args[1]), dtype='d')\n\n self.opts = opts\n if len(self.motors) == len(self.paths) > 0:\n self.N = len(self.motors)\n else:\n raise ValueError(\n 'Moveable and func lists must be non-empty and same length')\n npoints = len(self.paths[0])\n try:\n # if everything is OK, the following lines should return a 2D array\n # n which each motor path is a row.\n # Typical failure is due to shape mismatch due to inconsistent\n # input\n self.paths = numpy.array(self.paths, dtype='d')\n self.paths.reshape((self.N, npoints))\n except Exception: # shape mismatch?\n # try to give a meaningful description of the error\n for p, fs in zip(self.paths, self.funcstrings):\n if len(p) != npoints:\n raise ValueError('\"%s\" and \"%s\" yield different number '\n 'of points (%i vs %i)' %\n (self.funcstrings[0], fs, npoints,\n len(p)))\n raise # the problem wasn't a shape mismatch\n self._nb_points = npoints\n\n if self._integ_time.size == 1:\n self._integ_time = self._integ_time * \\\n numpy.ones(self._nb_points) # extend integ_time\n elif self._integ_time.size != self._nb_points:\n raise ValueError('time_integ must either be a scalar or '\n 'length=npoints (%i)' % self._nb_points)\n\n self.name = opts.get('name', 'fscan')\n\n generator = self._generator\n moveables = self.motors\n env = opts.get('env', {})\n constrains = [getCallable(cns) for cns in opts.get(\n 'constrains', [UNCONSTRAINED])]\n\n # Hooks are not always set at this point. We will call getHooks\n # later on in the scan_loop\n # self.pre_scan_hooks = self.getHooks('pre-scan')\n # self.post_scan_hooks = self.getHooks('post-scan'\n\n self._gScan = SScan(self, generator, moveables, env, constrains)\n\n # _data is the default member where the Macro class stores the data.\n # Assign the date produced by GScan (or its subclasses) to it so all\n # the Macro infrastructure related to the data works e.g. getter,\n # property, etc.\n self.setData(self._gScan.data)\n\n def _generator(self):\n step = {}\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n step[\"post-move-hooks\"] = self.getHooks('post-move')\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = (self.getHooks('post-acq') +\n self.getHooks('_NOHINTS_'))\n step[\"post-step-hooks\"] = self.getHooks('post-step')\n\n step[\"check_func\"] = []\n for i in range(self._nb_points):\n step[\"positions\"] = self.paths[:, i]\n step[\"integ_time\"] = self._integ_time[i]\n step[\"point_id\"] = i\n yield step\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n def _get_nr_points(self):\n msg = (\"nr_points is deprecated since version 3.0.3. \"\n \"Use nb_points instead.\")\n self.warning(msg)\n return self.nb_points\n\n nr_points = property(_get_nr_points)\n\n\n\nclass ascanh(aNscan, Macro):\n \"\"\"Do an absolute scan of the specified motor.\n ascan scans one motor, as specified by motor. The motor starts at the\n position given by start_pos and ends at the position given by final_pos.\n The step size is (start_pos-final_pos)/nr_interv. The number of data\n points collected will be nr_interv+1. Count time is given by time which\n if positive, specifies seconds and if negative, specifies monitor\n counts. \"\"\"\n\n param_def = [\n ['motor', Type.Moveable, None, 'Moveable to move'],\n ['start_pos', Type.Float, None, 'Scan start position'],\n ['final_pos', Type.Float, None, 'Scan final position'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, motor, start_pos, final_pos, nr_interv, integ_time,\n **opts):\n self._prepare([motor], [start_pos], [final_pos], nr_interv, integ_time,\n mode=HybridMode, **opts)\n\n\nclass rscan(Macro, Hookable):\n \"\"\"rscan.\n Do an absolute scan of the specified motor with different number of intervals for each region.\n It uses the gscan framework.\n \"\"\"\n\n hints = {'scan': 'rscan', 'allowsHooks': ('pre-scan', 'pre-move',\n 'post-move', 'pre-acq',\n 'post-acq', 'post-step',\n 'post-scan')}\n # env = ('ActiveMntGrp',)\n\n param_def = [\n ['motor', Type.Moveable, None, 'Motor to move'],\n ['start_pos', Type.Float, None, 'Start position'],\n ['regions',\n [['next_pos', Type.Float, None, 'next position'],\n ['region_nr_intervals', Type.Integer, None,\n 'Region number of intervals']],\n None, 'List of tuples: (next_pos, region_nr_intervals'],\n ['integ_time', Type.Float, None, 'Integration time']\n ]\n\n def prepare(self, motor, start_pos, regions, integ_time, **opts):\n self.name = 'rscan'\n self.integ_time = integ_time\n self.start_pos = start_pos\n self.regions = regions\n self.regions_count = len(self.regions) // 2\n\n generator = self._generator\n self.motors = [motor]\n env = opts.get('env', {})\n constrains = []\n self._gScan = SScan(self, generator, self.motors, env, constrains)\n self._data = self._gScan.data\n\n def _generator(self):\n step = {}\n step[\"integ_time\"] = self.integ_time\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n step[\"post-move-hooks\"] = self.getHooks('post-move')\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = self.getHooks('post-acq') + self.getHooks(\n '_NOHINTS_')\n step[\"post-step-hooks\"] = self.getHooks('post-step')\n\n point_id = 0\n region_start = self.start_pos\n for r in range(len(self.regions)):\n region_stop, region_nr_intervals = self.regions[\n r][0], self.regions[r][1]\n positions = numpy.linspace(\n region_start, region_stop, region_nr_intervals + 1)\n if point_id != 0:\n # positions must be calculated from the start to the end of the region\n # but after the first region, the 'start' point must not be\n # repeated\n positions = positions[1:]\n for p in positions:\n step['positions'] = [p]\n step['point_id'] = point_id\n point_id += 1\n yield step\n region_start = region_stop\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n\nclass r2scan(Macro, Hookable):\n \"\"\"r2scan.\n Do an absolute scan of the specified motors with different number of intervals for each region.\n It uses the gscan framework. All the motors will be drived to the same position in each step\n \"\"\"\n\n hints = {'scan': 'r2scan', 'allowsHooks': ('pre-scan', 'pre-move',\n 'post-move', 'pre-acq',\n 'post-acq', 'post-step',\n 'post-scan')}\n # env = ('ActiveMntGrp',)\n\n param_def = [\n ['motor1', Type.Moveable, None, 'Motor to move'],\n ['motor2', Type.Moveable, None, 'Motor to move'],\n ['start_pos', Type.Float, None, 'Start position'],\n ['regions',\n [['next_pos', Type.Float, None, 'next position'],\n ['region_nr_intervals', Type.Integer, None,\n 'Region number of intervals']],\n None, 'List of tuples: (next_pos, region_nr_intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ]\n\n def prepare(self, motor1, motor2, start_pos, regions, integ_time, **opts):\n self.name = 'r2scan'\n self.integ_time = integ_time\n self.start_pos = start_pos\n self.regions = regions\n self.regions_count = len(self.regions) // 2\n\n generator = self._generator\n self.motors = [motor1, motor2]\n env = opts.get('env', {})\n constrains = []\n self._gScan = SScan(self, generator, self.motors, env, constrains)\n self._data = self._gScan.data\n\n def _generator(self):\n step = {}\n step[\"integ_time\"] = self.integ_time\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n step[\"post-move-hooks\"] = self.getHooks('post-move')\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = self.getHooks('post-acq') + self.getHooks(\n '_NOHINTS_')\n step[\"post-step-hooks\"] = self.getHooks('post-step')\n\n point_id = 0\n region_start = self.start_pos\n for r in range(len(self.regions)):\n region_stop, region_nr_intervals = self.regions[\n r][0], self.regions[r][1]\n positions = numpy.linspace(\n region_start, region_stop, region_nr_intervals + 1)\n if point_id != 0:\n # positions must be calculated from the start to the end of the region\n # but after the first region, the 'start' point must not be\n # repeated\n positions = positions[1:]\n for p in positions:\n step['positions'] = [p, p]\n step['point_id'] = point_id\n point_id += 1\n yield step\n region_start = region_stop\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n\nclass r3scan(Macro, Hookable):\n \"\"\"r3scan.\n Do an absolute scan of the specified motors with different number of\n intervals for each region. It uses the gscan framework.\n All the motors will be drived to the same position in each step\n\n \"\"\"\n\n hints = {'scan': 'r3scan', 'allowsHooks': ('pre-scan', 'pre-move',\n 'post-move', 'pre-acq',\n 'post-acq', 'post-step',\n 'post-scan')}\n # env = ('ActiveMntGrp',)\n\n param_def = [\n ['motor1', Type.Moveable, None, 'Motor to move'],\n ['motor2', Type.Moveable, None, 'Motor to move'],\n ['motor3', Type.Moveable, None, 'Motor to move'],\n ['start_pos', Type.Float, None, 'Start position'],\n ['regions',\n [['next_pos', Type.Float, None, 'next position'],\n ['region_nr_intervals', Type.Integer, None,\n 'Region number of intervals']],\n None, 'List of tuples: (next_pos, region_nr_intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ]\n\n def prepare(self, motor1, motor2, motor3, start_pos, regions, integ_time, **opts):\n self.name = 'r3scan'\n self.integ_time = integ_time\n self.start_pos = start_pos\n self.regions = regions\n self.regions_count = len(self.regions) // 2\n\n generator = self._generator\n self.motors = [motor1, motor2, motor3]\n env = opts.get('env', {})\n constrains = []\n self._gScan = SScan(self, generator, self.motors, env, constrains)\n self._data = self._gScan.data\n\n def _generator(self):\n step = {}\n step[\"integ_time\"] = self.integ_time\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n step[\"post-move-hooks\"] = self.getHooks('post-move')\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = self.getHooks('post-acq') + self.getHooks(\n '_NOHINTS_')\n step[\"post-step-hooks\"] = self.getHooks('post-step')\n\n point_id = 0\n region_start = self.start_pos\n for r in range(len(self.regions)):\n region_stop, region_nr_intervals = self.regions[\n r][0], self.regions[r][1]\n positions = numpy.linspace(\n region_start, region_stop, region_nr_intervals + 1)\n if point_id != 0:\n # positions must be calculated from the start to the end of the region\n # but after the first region, the 'start' point must not be\n # repeated\n positions = positions[1:]\n for p in positions:\n step['positions'] = [p, p, p]\n step['point_id'] = point_id\n point_id += 1\n yield step\n region_start = region_stop\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n\nclass scanhist(Macro):\n \"\"\"Shows scan history information. Give optional parameter scan number to\n display details about a specific scan\"\"\"\n\n param_def = [\n ['scan number', Type.Integer, -1,\n 'scan number. [default=-1 meaning show all scans]'],\n ]\n\n def run(self, scan_number):\n try:\n hist = self.getEnv(\"ScanHistory\")\n except UnknownEnv:\n print(\"No scan recorded in history\")\n return\n if scan_number < 0:\n self.show_all(hist)\n else:\n self.show_one(hist, scan_number)\n\n def show_one(self, hist, scan_number):\n item = None\n for h in hist:\n if h['serialno'] == scan_number:\n item = h\n break\n if item is None:\n self.warning(\"Could not find scan number %s\", scan_number)\n return\n\n serialno, title = h['serialno'], h['title']\n start = datetime.datetime.fromtimestamp(h['startts'])\n end = datetime.datetime.fromtimestamp(h['endts'])\n total_time = end - start\n start, end, total_time = start.ctime(), end.ctime(), str(total_time)\n scan_dir, scan_file = h['ScanDir'], h['ScanFile']\n deadtime = '%.1f%%' % h['deadtime']\n\n user = h['user']\n store = \"Not stored!\"\n if scan_dir is not None and scan_file is not None:\n if isinstance(scan_file, str):\n store = os.path.join(scan_dir, scan_file)\n else:\n store = scan_dir + os.path.sep + str(scan_file)\n\n channels = \", \".join(h['channels'])\n cols = [\"#\", \"Title\", \"Start time\", \"End time\", \"Took\", \"Dead time\",\n \"User\", \"Stored\", \"Channels\"]\n data = [serialno, title, start, end, total_time, deadtime, user, store,\n channels]\n\n table = Table([data], row_head_str=cols, row_head_fmt='%*s',\n elem_fmt=['%-*s'],\n col_sep=' : ')\n for line in table.genOutput():\n self.output(line)\n\n def show_all(self, hist):\n\n cols = \"#\", \"Title\", \"Start time\", \"End time\", \"Stored\"\n width = -1, -1, -1, -1, -1\n out = List(cols, max_col_width=width)\n today = datetime.datetime.today().date()\n for h in hist:\n start = datetime.datetime.fromtimestamp(h['startts'])\n if start.date() == today:\n start = start.time().strftime(\"%H:%M:%S\")\n else:\n start = start.strftime(\"%Y-%m-%d %H:%M:%S\")\n end = datetime.datetime.fromtimestamp(h['endts'])\n if end.date() == today:\n end = end.time().strftime(\"%H:%M:%S\")\n else:\n end = end.strftime(\"%Y-%m-%d %H:%M:%S\")\n scan_file = h['ScanFile']\n store = \"Not stored!\"\n if scan_file is not None:\n store = \", \".join(scan_file)\n row = h['serialno'], h['title'], start, end, store\n out.appendRow(row)\n for line in out.genOutput():\n self.output(line)\n\n\nclass ascanc(aNscan, Macro):\n \"\"\"Do an absolute continuous scan of the specified motor.\n ascanc scans one motor, as specified by motor.\"\"\"\n\n param_def = [\n ['motor', Type.Moveable, None, 'Moveable to move'],\n ['start_pos', Type.Float, None, 'Scan start position'],\n ['final_pos', Type.Float, None, 'Scan final position'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, motor, start_pos, final_pos, integ_time, slow_down,\n **opts):\n self._prepare([motor], [start_pos], [final_pos], slow_down,\n integ_time, mode=ContinuousMode, **opts)\n\n\nclass a2scanc(aNscan, Macro):\n \"\"\"two-motor continuous scan\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, motor1, start_pos1, final_pos1, motor2, start_pos2,\n final_pos2, integ_time, slow_down, **opts):\n self._prepare([motor1, motor2], [start_pos1, start_pos2],\n [final_pos1, final_pos2], slow_down, integ_time,\n mode=ContinuousMode, **opts)\n\n\nclass a3scanc(aNscan, Macro):\n \"\"\"three-motor continuous scan\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, integ_time,\n slow_down, **opts):\n self._prepare([m1, m2, m3], [s1, s2, s3], [f1, f2, f3], slow_down,\n integ_time, mode=ContinuousMode, **opts)\n\n\nclass a4scanc(aNscan, Macro):\n \"\"\"four-motor continuous scan\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['motor4', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos4', Type.Float, None, 'Scan start position 3'],\n ['final_pos4', Type.Float, None, 'Scan final position 3'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, m4, s4, f4,\n integ_time, slow_down, **opts):\n self._prepare([m1, m2, m3, m4], [s1, s2, s3, s4], [f1, f2, f3, f4],\n slow_down, integ_time, mode=ContinuousMode, **opts)\n\n\nclass dNscanc(dNscan):\n\n def do_restore(self):\n # set velocities to maximum and then move to initial positions\n for moveable in self.motors:\n self._gScan.set_max_top_velocity(moveable)\n dNscan.do_restore(self)\n\n\nclass dscanc(dNscanc, Macro):\n \"\"\"continuous motor scan relative to the starting position.\"\"\"\n\n param_def = [\n ['motor', Type.Moveable, None, 'Moveable to move'],\n ['start_pos', Type.Float, None, 'Scan start position'],\n ['final_pos', Type.Float, None, 'Scan final position'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, motor, start_pos, final_pos, integ_time, slow_down,\n **opts):\n self._prepare([motor], [start_pos], [final_pos], slow_down, integ_time,\n mode=ContinuousMode, **opts)\n\n\nclass d2scanc(dNscanc, Macro):\n \"\"\"continuous two-motor scan relative to the starting positions\"\"\"\n\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, motor1, start_pos1, final_pos1, motor2, start_pos2,\n final_pos2, integ_time, slow_down, **opts):\n self._prepare([motor1, motor2], [start_pos1, start_pos2],\n [final_pos1, final_pos2], slow_down, integ_time,\n mode=ContinuousMode, **opts)\n\n\nclass d3scanc(dNscanc, Macro):\n \"\"\"continuous three-motor scan\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, integ_time,\n slow_down, **opts):\n self._prepare([m1, m2, m3], [s1, s2, s3], [f1, f2, f3], slow_down,\n integ_time, mode=ContinuousMode, **opts)\n\n\nclass d4scanc(dNscanc, Macro):\n \"\"\"continuous four-motor scan relative to the starting positions\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['motor4', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos4', Type.Float, None, 'Scan start position 3'],\n ['final_pos4', Type.Float, None, 'Scan final position 3'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, m4, s4, f4,\n integ_time, slow_down, **opts):\n self._prepare([m1, m2, m3, m4], [s1, s2, s3, s4], [f1, f2, f3, f4],\n slow_down, integ_time, mode=ContinuousMode, **opts)\n\n\nclass meshc(Macro, Hookable):\n \"\"\"2d grid scan. scans continuous\"\"\"\n\n hints = {'scan': 'mesh', 'allowsHooks': ('pre-scan', 'pre-move',\n 'post-move', 'pre-acq',\n 'post-acq', 'post-step',\n 'post-scan')}\n env = ('ActiveMntGrp',)\n\n param_def = [\n ['motor1', Type.Moveable, None, 'First motor to move'],\n ['m1_start_pos', Type.Float, None, 'Scan start position for first '\n 'motor'],\n ['m1_final_pos', Type.Float, None, 'Scan final position for first '\n 'motor'],\n ['slow_down', Type.Float, None, 'global scan slow down factor (0, 1]'],\n ['motor2', Type.Moveable, None, 'Second motor to move'],\n ['m2_start_pos', Type.Float, None, 'Scan start position for second '\n 'motor'],\n ['m2_final_pos', Type.Float, None, 'Scan final position for second '\n 'motor'],\n ['m2_nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['bidirectional', Type.Boolean, False, 'Save time by scanning '\n 's-shaped']\n ]\n\n def prepare(self, m1, m1_start_pos, m1_final_pos, slow_down,\n m2, m2_start_pos, m2_final_pos, m2_nr_interv, integ_time,\n bidirectional, **opts):\n self.motors = [m1, m2]\n self.slow_down = slow_down\n self.starts = numpy.array([m1_start_pos, m2_start_pos], dtype='d')\n self.finals = numpy.array([m1_final_pos, m2_final_pos], dtype='d')\n self.m2_nr_interv = m2_nr_interv\n self.integ_time = integ_time\n self.bidirectional_mode = bidirectional\n self.nr_waypoints = m2_nr_interv + 1\n\n self.name = opts.get('name', 'meshc')\n\n moveables = []\n for m, start, final in zip(self.motors, self.starts, self.finals):\n moveables.append(MoveableDesc(moveable=m, min_value=min(\n start, final), max_value=max(start, final)))\n moveables[0].is_reference = True\n\n env = opts.get('env', {})\n constrains = [getCallable(cns) for cns in opts.get(\n 'constrains', [UNCONSTRAINED])]\n extrainfodesc = opts.get('extrainfodesc', [])\n\n # Hooks are not always set at this point. We will call getHooks\n # later on in the scan_loop\n # self.pre_scan_hooks = self.getHooks('pre-scan')\n # self.post_scan_hooks = self.getHooks('post-scan'\n\n self._gScan = CSScan(self, self._waypoint_generator,\n self._period_generator, moveables, env,\n constrains, extrainfodesc)\n self._gScan.frozen_motors = [m2]\n\n # _data is the default member where the Macro class stores the data.\n # Assign the date produced by GScan (or its subclasses) to it so all\n # the Macro infrastructure related to the data works e.g. getter,\n # property, etc.\n self.setData(self._gScan.data)\n\n def _waypoint_generator(self):\n step = {}\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n step[\"post-move-hooks\"] = self.getHooks('post-move')\n step[\"check_func\"] = []\n step[\"slow_down\"] = self.slow_down\n points2 = self.m2_nr_interv + 1\n m1start, m2start = self.starts\n m1end, m2end = self.finals\n point_no = 1\n for i, m2pos in enumerate(numpy.linspace(m2start, m2end, points2)):\n start, end = m1start, m1end\n if i % 2 != 0 and self.bidirectional_mode:\n start, end = m1end, m1start\n step[\"start_positions\"] = numpy.array([start, m2pos])\n step[\"positions\"] = numpy.array([end, m2pos])\n step[\"point_id\"] = point_no\n point_no += 1\n yield step\n\n def _period_generator(self):\n step = {}\n step[\"integ_time\"] = self.integ_time\n step[\"pre-acq-hooks\"] = self.getHooks('pre-acq')\n step[\"post-acq-hooks\"] = (self.getHooks('post-acq') +\n self.getHooks('_NOHINTS_'))\n step[\"post-step-hooks\"] = self.getHooks('post-step')\n step[\"check_func\"] = []\n step['extrainfo'] = {}\n point_no = 0\n while(True):\n point_no += 1\n step[\"point_id\"] = point_no\n yield step\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n def getTimeEstimation(self):\n return self._gScan.waypoint_estimation()\n\n def getIntervalEstimation(self):\n return self.nr_waypoints\n\n\nclass dmeshc(meshc):\n \"\"\"2d relative continuous grid scan.\n The relative mesh scan traces out a grid using motor1 and motor2.\n If first motor is at the position X before the scan begins, it will\n be continuously scanned from X+m1_start_pos to X+m1_final_pos.\n If the second motor is at the position Y before the scan begins,\n it will be discrete scanned from Y+m2_start_pos to Y+m2_final_pos\n using the specified m2_nr_interv number of intervals.\n The scan considers the accel. and decel. times of the motor1, so the\n counts (for the integ_time seconds or monitor counts,\n if integ_time is negative) are executed while motor1 is moving\n with the constant velocity.\n Upon scan completion, it returns the motors to their original positions.\n \"\"\"\n\n hints = copy.deepcopy(meshc.hints)\n hints['scan'] = 'dmeshc'\n\n env = copy.deepcopy(meshc.env)\n\n param_def = [\n ['motor1', Type.Moveable, None, 'First motor to move'],\n ['m1_start_pos', Type.Float, None, 'Scan start position for first '\n 'motor'],\n ['m1_final_pos', Type.Float, None, 'Scan final position for first '\n 'motor'],\n ['slow_down', Type.Float, None, 'global scan slow down factor (0, 1]'],\n ['motor2', Type.Moveable, None, 'Second motor to move'],\n ['m2_start_pos', Type.Float, None, 'Scan start position for second '\n 'motor'],\n ['m2_final_pos', Type.Float, None, 'Scan final position for second '\n 'motor'],\n ['m2_nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['bidirectional', Type.Boolean, False, 'Save time by scanning '\n 's-shaped']\n ]\n\n def prepare(self, m1, m1_start_pos, m1_final_pos, slow_down,\n m2, m2_start_pos, m2_final_pos, m2_nr_interv, integ_time,\n bidirectional, **opts):\n self._motion = self.getMotion([m1, m2])\n self.originalPositions = numpy.array(\n self._motion.readPosition(force=True))\n start1 = self.originalPositions[0] + m1_start_pos\n start2 = self.originalPositions[1] + m2_start_pos\n final1 = self.originalPositions[0] + m1_final_pos\n final2 = self.originalPositions[1] + m2_final_pos\n meshc.prepare(self, m1, start1, final1, slow_down,\n m2, start2, final2, m2_nr_interv, integ_time,\n bidirectional, **opts)\n\n def do_restore(self):\n self.info(\"Returning to start positions...\")\n self._motion.move(self.originalPositions)\n\n\nclass aNscanct(aNscan):\n \"\"\"N-dimensional continuous scan. This is **not** meant to be called by\n the user, but as a generic base to construct ascanct, a2scanct, a3scanct,\n ...\"\"\"\n\n hints = {\"scan\": \"aNscanct\",\n \"allowsHooks\": (\"pre-scan\", \"pre-configuration\",\n \"post-configuration\", \"pre-move\",\n \"post-move\", \"pre-acq\", \"pre-start\",\n \"post-acq\", \"pre-cleanup\", \"post-cleanup\",\n \"post-scan\")}\n\n\nclass ascanct(aNscanct, Macro):\n \"\"\"Do an absolute continuous scan of the specified motor.\n ascanct scans one motor, as specified by motor. The motor starts before the\n position given by start_pos in order to reach the constant velocity at the\n start_pos and finishes at the position after the final_pos in order to\n maintain the constant velocity until the final_pos.\"\"\"\n\n param_def = [['motor', Type.Moveable, None, 'Moveable name'],\n ['start_pos', Type.Float, None, 'Scan start position'],\n ['final_pos', Type.Float, None, 'Scan final position'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['latency_time', Type.Float, 0, 'Latency time']]\n\n def prepare(self, motor, start_pos, final_pos, nr_interv,\n integ_time, latency_time, **opts):\n self._prepare([motor], [start_pos], [final_pos], nr_interv,\n integ_time, mode=ContinuousHwTimeMode,\n latency_time=latency_time, **opts)\n\n\nclass a2scanct(aNscanct, Macro):\n \"\"\"Two-motor continuous scan.\n a2scanct scans two motors, as specified by motor1 and motor2. Each motor\n starts before the position given by its start_pos in order to reach the\n constant velocity at its start_pos and finishes at the position after\n its final_pos in order to maintain the constant velocity until its\n final_pos.\"\"\"\n\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['latency_time', Type.Float, 0, 'Latency time']]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, nr_interv,\n integ_time, latency_time, **opts):\n self._prepare([m1, m2], [s1, s2], [f1, f2], nr_interv,\n integ_time, mode=ContinuousHwTimeMode,\n latency_time=latency_time, **opts)\n\n\nclass a3scanct(aNscanct, Macro):\n \"\"\"Three-motor continuous scan.\n a2scanct scans three motors, as specified by motor1, motor2 and motor3.\n Each motor starts before the position given by its start_pos in order to\n reach the constant velocity at its start_pos and finishes at the position\n after its final_pos in order to maintain the constant velocity until its\n final_pos.\"\"\"\n\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['latency_time', Type.Float, 0, 'Latency time']]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, nr_interv,\n integ_time, latency_time, **opts):\n self._prepare([m1, m2, m3], [s1, s2, s3], [f1, f2, f3], nr_interv,\n integ_time, mode=ContinuousHwTimeMode,\n latency_time=latency_time, **opts)\n\n\nclass a4scanct(aNscan, Macro):\n \"\"\"Four-motor continuous scan.\n a2scanct scans four motors, as specified by motor1, motor2, motor3 and\n motor4. Each motor starts before the position given by its start_pos in\n order to reach the constant velocity at its start_pos and finishes at the\n position after its final_pos in order to maintain the constant velocity\n until its final_pos.\"\"\"\n\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['motor4', Type.Moveable, None, 'Moveable 4 to move'],\n ['start_pos4', Type.Float, None, 'Scan start position 4'],\n ['final_pos4', Type.Float, None, 'Scan final position 4'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['latency_time', Type.Float, 0, 'Latency time']]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, m4, s4, f4,\n nr_interv, integ_time, latency_time, **opts):\n self._prepare([m1, m2, m3, m4], [s1, s2, s3, s4], [f1, f2, f3, f4],\n nr_interv, integ_time, mode=ContinuousHwTimeMode,\n latency_time=latency_time, **opts)\n\n\nclass dNscanct(dNscan):\n \"\"\"N-dimensional continuous scan. This is **not** meant to be called by\n the user, but as a generic base to construct ascanct, a2scanct, a3scanct,\n ...\"\"\"\n\n hints = {\"scan\": \"dNscanct\",\n \"allowsHooks\": (\"pre-scan\", \"pre-configuration\",\n \"post-configuration\", \"pre-move\",\n \"post-move\", \"pre-acq\", \"pre-start\",\n \"post-acq\", \"pre-cleanup\", \"post-cleanup\",\n \"post-scan\")}\n\n\nclass dscanct(dNscanct, Macro):\n \"\"\"Do an a relative continuous motor scan,\n dscanct scans a motor, as specified by motor1.\n The Motor starts before the position given by its start_pos in order to\n reach the constant velocity at its start_pos and finishes at the position\n after its final_pos in order to maintain the constant velocity until its\n final_pos.\"\"\"\n\n param_def = [['motor', Type.Moveable, None, 'Moveable name'],\n ['start_pos', Type.Float, None, 'Scan start position'],\n ['final_pos', Type.Float, None, 'Scan final position'],\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['latency_time', Type.Float, 0, 'Latency time']]\n\n def prepare(self, motor, start_pos, final_pos, nr_interv,\n integ_time, latency_time, **opts):\n self._prepare([motor], [start_pos], [final_pos], nr_interv,\n integ_time, mode=ContinuousHwTimeMode,\n latency_time=latency_time, **opts)\n\n\nclass d2scanct(dNscanct, Macro):\n \"\"\"continuous two-motor scan relative to the starting positions,\n d2scanct scans three motors, as specified by motor1 and motor2.\n Each motor starts before the position given by its start_pos in order to\n reach the constant velocity at its start_pos and finishes at the position\n after its final_pos in order to maintain the constant velocity until its\n final_pos.\n \"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, integ_time, slow_down, **opts):\n self._prepare([m1, m2], [s1, s2], [f1, f2], slow_down, integ_time,\n mode=ContinuousHwTimeMode, **opts)\n\n\nclass d3scanct(dNscanct, Macro):\n \"\"\"continuous three-motor scan relative to the starting positions,\n d3scanct scans three motors, as specified by motor1, motor2 and motor3.\n Each motor starts before the position given by its start_pos in order to\n reach the constant velocity at its start_pos and finishes at the position\n after its final_pos in order to maintain the constant velocity until its\n final_pos.\n \"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, integ_time,\n slow_down, **opts):\n self._prepare([m1, m2, m3], [s1, s2, s3], [f1, f2, f3], slow_down,\n integ_time, mode=ContinuousHwTimeMode, **opts)\n\n\nclass d4scanct(dNscanct, Macro):\n \"\"\"continuous four-motor scan relative to the starting positions,\n d4scanct scans three motors, as specified by motor1, motor2, motor3 and\n motor4.\n Each motor starts before the position given by its start_pos in order to\n reach the constant velocity at its start_pos and finishes at the position\n after its final_pos in order to maintain the constant velocity until its\n final_pos.\"\"\"\n param_def = [\n ['motor1', Type.Moveable, None, 'Moveable 1 to move'],\n ['start_pos1', Type.Float, None, 'Scan start position 1'],\n ['final_pos1', Type.Float, None, 'Scan final position 1'],\n ['motor2', Type.Moveable, None, 'Moveable 2 to move'],\n ['start_pos2', Type.Float, None, 'Scan start position 2'],\n ['final_pos2', Type.Float, None, 'Scan final position 2'],\n ['motor3', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos3', Type.Float, None, 'Scan start position 3'],\n ['final_pos3', Type.Float, None, 'Scan final position 3'],\n ['motor4', Type.Moveable, None, 'Moveable 3 to move'],\n ['start_pos4', Type.Float, None, 'Scan start position 3'],\n ['final_pos4', Type.Float, None, 'Scan final position 3'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['slow_down', Type.Float, 1, 'global scan slow down factor (0, 1]'],\n ]\n\n def prepare(self, m1, s1, f1, m2, s2, f2, m3, s3, f3, m4, s4, f4,\n integ_time, slow_down, **opts):\n self._prepare([m1, m2, m3, m4], [s1, s2, s3, s4], [f1, f2, f3, f4],\n slow_down, integ_time, mode=ContinuousHwTimeMode, **opts)\n\n\nclass meshct(Macro, Hookable):\n \"\"\"2d grid scan .\n The mesh scan traces out a grid using motor1 and motor2.\n The first motor scans in contiuous mode from m1_start_pos to m1_final_pos\n using the specified number of intervals. The second motor similarly\n scans from m2_start_pos to m2_final_pos but it does not move during the\n continuous scan. Each point is counted for integ_time seconds\n (or monitor counts, if integ_time is negative).\n The scan of motor1 is done at each point scanned by motor2. That is, the\n first motor scan is nested within the second motor scan.\n \"\"\"\n\n hints = {\"scan\": \"meshct\",\n \"allowsHooks\": (\"pre-scan\", \"pre-configuration\",\n \"post-configuration\", \"pre-move\",\n \"post-move\", \"pre-acq\", \"pre-start\",\n \"post-acq\", \"pre-cleanup\", \"post-cleanup\",\n \"post-scan\")}\n env = ('ActiveMntGrp',)\n\n param_def = [\n ['motor1', Type.Moveable, None, 'First motor to move'],\n ['m1_start_pos', Type.Float, None, 'Scan start position for first '\n 'motor'],\n ['m1_final_pos', Type.Float, None, 'Scan final position for first '\n 'motor'],\n ['m1_nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['motor2', Type.Moveable, None, 'Second motor to move'],\n ['m2_start_pos', Type.Float, None, 'Scan start position for second '\n 'motor'],\n ['m2_final_pos', Type.Float, None, 'Scan final position for second '\n 'motor'],\n ['m2_nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['bidirectional', Type.Boolean, False, 'Save time by scanning '\n 's-shaped'],\n ['latency_time', Type.Float, 0, 'Latency time']\n ]\n\n def prepare(self, m1, m1_start_pos, m1_final_pos, m1_nr_interv,\n m2, m2_start_pos, m2_final_pos, m2_nr_interv, integ_time,\n bidirectional, latency_time, **opts):\n\n self.motors = [m1, m2]\n self.starts = numpy.array([m1_start_pos, m2_start_pos], dtype='d')\n self.finals = numpy.array([m1_final_pos, m2_final_pos], dtype='d')\n self.nr_intervs = numpy.array([m1_nr_interv, m2_nr_interv], dtype='i')\n\n # Number of intervals of the first motor which is doing the\n # continuous scan.\n self.nr_interv = m1_nr_interv\n self.nb_points = self.nr_interv + 1\n self.integ_time = integ_time\n self.bidirectional_mode = bidirectional\n\n # Prepare the waypoints\n m1start, m2start = self.starts\n m1end, m2end = self.finals\n points1, points2 = self.nr_intervs + 1\n\n m2_space = numpy.linspace(m2start, m2end, points2)\n self.waypoints = []\n self.starts_points = []\n for i, m2pos in enumerate(m2_space):\n self.starts_points.append(numpy.array([m1start, m2pos], dtype='d'))\n self.waypoints.append(numpy.array([m1end, m2pos], dtype='d'))\n if self.bidirectional_mode:\n m1start, m1end = m1end, m1start\n\n self.name = opts.get('name', 'meshct')\n\n moveables = []\n for m, start, final in zip(self.motors, self.starts, self.finals):\n moveables.append(MoveableDesc(moveable=m, min_value=min(\n start, final), max_value=max(start, final)))\n moveables[0].is_reference = True\n\n env = opts.get('env', {})\n mg_name = self.getEnv('ActiveMntGrp')\n mg = self.getMeasurementGroup(mg_name)\n mg_latency_time = mg.getLatencyTime()\n if mg_latency_time > latency_time:\n self.info(\"Choosing measurement group latency time: %f\" %\n mg_latency_time)\n latency_time = mg_latency_time\n\n self.latency_time = latency_time\n\n constrains = [getCallable(cns) for cns in opts.get('constrains',\n [UNCONSTRAINED])]\n\n extrainfodesc = opts.get('extrainfodesc', [])\n\n # Hooks are not always set at this point. We will call getHooks\n # later on in the scan_loop\n # self.pre_scan_hooks = self.getHooks('pre-scan')\n # self.post_scan_hooks = self.getHooks('post-scan')\n\n self._gScan = CTScan(self, self._generator, moveables, env, constrains,\n extrainfodesc)\n # _data is the default member where the Macro class stores the data.\n # Assign the date produced by GScan (or its subclasses) to it so all\n # the Macro infrastructure related to the data works e.g. getter,\n # property, etc.\n self.setData(self._gScan.data)\n\n def _generator(self):\n moveables_trees = self._gScan.get_moveables_trees()\n step = {}\n step[\"pre-move-hooks\"] = self.getHooks('pre-move')\n post_move_hooks = self.getHooks(\n 'post-move') + [self._fill_missing_records]\n step[\"post-move-hooks\"] = post_move_hooks\n step[\"check_func\"] = []\n step[\"active_time\"] = self.nb_points * (self.integ_time\n + self.latency_time)\n\n points1, _ = self.nr_intervs + 1\n for i, waypoint in enumerate(self.waypoints):\n self.point_id = points1 * i\n step[\"waypoint_id\"] = i\n self.starts = self.starts_points[i]\n self.finals = waypoint\n step[\"positions\"] = []\n step[\"start_positions\"] = []\n\n for start, end, moveable_tree in zip(self.starts, self.finals,\n moveables_trees):\n moveable_root = moveable_tree.root()\n start_positions, end_positions = _calculate_positions(\n moveable_root, start, end)\n step[\"start_positions\"] += start_positions\n step[\"positions\"] += end_positions\n\n yield step\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n def getTimeEstimation(self):\n return 0.0\n\n def getIntervalEstimation(self):\n return len(self.waypoints)\n\n def _fill_missing_records(self):\n # fill record list with dummy records for the final padding\n nb_of_points = self.nb_points\n scan = self._gScan\n nb_of_total_records = len(scan.data.records)\n nb_of_records = nb_of_total_records - self.point_id\n missing_records = nb_of_points - nb_of_records\n scan.data.initRecords(missing_records)\n\n def _get_nr_points(self):\n msg = (\"nr_points is deprecated since version 3.0.3. \"\n \"Use nb_points instead.\")\n self.warning(msg)\n return self.nb_points\n\n nr_points = property(_get_nr_points)\n\n\nclass timescan(Macro, Hookable):\n \"\"\"Do a time scan over the specified time intervals. The scan starts\n immediately. The number of data points collected will be nr_interv + 1.\n Count time is given by integ_time. Latency time will be the longer one\n of latency_time and measurement group latency time.\n \"\"\"\n\n hints = {'scan': 'timescan', 'allowsHooks': ('pre-scan', 'pre-acq',\n 'post-acq', 'post-scan')}\n\n param_def = [\n ['nr_interv', Type.Integer, None, 'Number of scan intervals'],\n ['integ_time', Type.Float, None, 'Integration time'],\n ['latency_time', Type.Float, 0, 'Latency time']]\n\n def prepare(self, nr_interv, integ_time, latency_time):\n self.nr_interv = nr_interv\n self.nb_points = nr_interv + 1\n self.integ_time = integ_time\n self.latency_time = latency_time\n self._gScan = TScan(self)\n\n # _data is the default member where the Macro class stores the data.\n # Assign the date produced by GScan (or its subclasses) to it so all\n # the Macro infrastructure related to the data works e.g. getter,\n # property, etc.\n self.setData(self._gScan.data)\n\n def run(self, *args):\n for step in self._gScan.step_scan():\n yield step\n\n def getTimeEstimation(self):\n mg_latency_time = self._gScan.measurement_group.getLatencyTime()\n latency_time = max(self.latency_time, mg_latency_time)\n return self.nb_points * (self.integ_time + latency_time)\n\n def getIntervalEstimation(self):\n return self.nr_interv\n\n def _get_nr_points(self):\n msg = (\"nr_points is deprecated since version 3.0.3. \"\n \"Use nb_points instead.\")\n self.warning(msg)\n return self.nb_points\n\n nr_points = property(_get_nr_points)\n\n\nclass scanstats(Macro):\n \"\"\"Calculate basic statistics of the enabled and plotted channels in\n the active measurement group for the last scan. If no channel is selected\n for plotting it fallbacks to the first enabled channel. Print stats and\n publish them in the env.\n The macro must be hooked in the post-scan hook place.\n \"\"\"\n\n env = (\"ActiveMntGrp\", )\n\n param_def = [\n [\"channel\",\n [[\"channel\", Type.ExpChannel, None, \"\"], {\"min\": 0}],\n None,\n \"List of channels for statistics calculations\"\n ]\n ]\n\n def run(self, channel):\n parent = self.getParentMacro()\n if not parent:\n self.warning(\"for now the scanstats macro can only be executed as\"\n \" a post-scan hook\")\n return\n if not hasattr(parent, \"motors\"):\n self.warning(\"scan must involve at least one moveable \"\n \"to calculate statistics\")\n return\n\n active_meas_grp = self.getEnv(\"ActiveMntGrp\")\n meas_grp = self.getMeasurementGroup(active_meas_grp)\n calc_channels = []\n enabled_channels = meas_grp.getEnabled()\n if channel:\n stat_channels = [chan.name for chan in channel]\n else:\n stat_channels = [key for key in enabled_channels.keys()]\n\n for chan in stat_channels:\n enabled = enabled_channels.get(chan)\n if enabled is None:\n self.warning(\"{} not in {}\".format(chan, meas_grp.name))\n else:\n if not enabled and channel:\n self.warning(\"{} not enabled\".format(chan))\n elif enabled and channel:\n # channel was given as parameters\n calc_channels.append(chan)\n elif enabled and meas_grp.getPlotType(chan)[chan] == 1:\n calc_channels.append(chan)\n\n if len(calc_channels) == 0:\n # fallback is first enabled channel in meas_grp\n calc_channels.append(next(iter(enabled_channels)))\n\n scalar_channels = []\n for _, chan in self.getExpChannels().items():\n if chan.type in (\"OneDExpChannel\", \"TwoDExpChannel\"):\n continue\n scalar_channels.append(chan.name)\n calc_channels = [ch for ch in calc_channels if ch in scalar_channels]\n\n if len(calc_channels) == 0:\n self.warning(\"measurement group must contain at least one \"\n \"enabled scalar channel to calculate statistics\")\n return\n\n selected_motor = str(parent.motors[0])\n stats = {}\n col_header = []\n cols = []\n\n motor_data = []\n channels_data = {}\n for channel_name in calc_channels:\n channels_data[channel_name] = []\n\n for idx, rc in parent.data.items():\n motor_data.append(rc[selected_motor])\n for channel_name in calc_channels:\n channels_data[channel_name].append(rc[channel_name])\n\n motor_data = numpy.array(motor_data)\n for channel_name, data in channels_data.items():\n channel_data = numpy.array(data)\n\n (_min, _max, min_at, max_at, half_max, com, mean, _int,\n fwhm, cen) = self._calcStats(motor_data, channel_data)\n stats[channel_name] = {\n \"min\": _min,\n \"max\": _max,\n \"minpos\": min_at,\n \"maxpos\": max_at,\n \"mean\": mean,\n \"int\": _int,\n \"com\": com,\n \"fwhm\": fwhm,\n \"cen\": cen}\n\n col_header.append([channel_name])\n cols.append([\n stats[channel_name][\"min\"],\n stats[channel_name][\"max\"],\n stats[channel_name][\"minpos\"],\n stats[channel_name][\"maxpos\"],\n stats[channel_name][\"mean\"],\n stats[channel_name][\"int\"],\n stats[channel_name][\"com\"],\n stats[channel_name][\"fwhm\"],\n stats[channel_name][\"cen\"],\n ])\n self.info(\"Statistics for movable: {:s}\".format(selected_motor))\n\n table = Table(elem_list=cols, elem_fmt=[\"%*g\"],\n row_head_str=[\"MIN\", \"MAX\", \"MIN@\", \"MAX@\",\n \"MEAN\", \"INT\", \"COM\", \"FWHM\", \"CEN\"],\n col_head_str=col_header, col_head_sep=\"-\")\n out = table.genOutput()\n\n for line in out:\n self.info(line)\n self.setEnv(\"{:s}.ScanStats\".format(self.getDoorName()),\n {\"Stats\": stats,\n \"Motor\": selected_motor,\n \"ScanID\": self.getEnv(\"ScanID\")})\n\n @staticmethod\n def _calcStats(x, y):\n # max and min\n _min = numpy.min(y)\n _max = numpy.max(y)\n\n min_idx = numpy.argmin(y)\n min_at = x[min_idx]\n max_idx = numpy.argmax(y)\n max_at = x[max_idx]\n\n # center of mass (com)\n try:\n com = numpy.sum(y*x)/numpy.sum(y)\n except ZeroDivisionError:\n com = 0\n\n mean = numpy.mean(y)\n _int = numpy.sum(y)\n\n # determine if it is a peak- or erf-like function\n half_max = (_max-_min)/2+_min\n\n lower_left = False\n lower_right = False\n\n if numpy.any(y[0:max_idx] < half_max):\n lower_left = True\n if numpy.any(y[max_idx:] < half_max):\n lower_right = True\n\n if lower_left and lower_right:\n # it is a peak-like function\n y_data = y\n elif lower_left:\n # it is an erf-like function\n # use the gradient for further calculation\n y_data = numpy.gradient(y)\n # use also the half maximum of the gradient\n half_max = (numpy.max(y_data)-numpy.min(y_data)) \\\n / 2+numpy.min(y_data)\n else:\n # it is an erf-like function\n # use the gradient for further calculation\n y_data = -1*numpy.gradient(y)\n # use also the half maximum of the gradient\n half_max = (numpy.max(y_data)-numpy.min(y_data)) \\\n / 2+numpy.min(y_data)\n\n # cen and fwhm\n # this part is adapted from:\n #\n # The PyMca X-Ray Fluorescence Toolkit\n #\n # Copyright (c) 2004-2014 European Synchrotron Radiation Facility\n #\n # This file is part of the PyMca X-ray Fluorescence Toolkit developed\n # at the ESRF by the Software group.\n\n max_idx_data = numpy.argmax(y_data)\n idx = max_idx_data\n try:\n while y_data[idx] >= half_max:\n idx = idx-1\n\n x0 = x[idx]\n x1 = x[idx+1]\n y0 = y_data[idx]\n y1 = y_data[idx+1]\n\n lhmx = (half_max*(x1-x0) - (y0*x1)+(y1*x0)) / (y1-y0)\n except ZeroDivisionError:\n lhmx = 0\n except IndexError:\n lhmx = x[0]\n\n idx = max_idx_data\n try:\n while y_data[idx] >= half_max:\n idx = idx+1\n\n x0 = x[idx-1]\n x1 = x[idx]\n y0 = y_data[idx-1]\n y1 = y_data[idx]\n\n uhmx = (half_max*(x1-x0) - (y0*x1)+(y1*x0)) / (y1-y0)\n except ZeroDivisionError:\n uhmx = 0\n except IndexError:\n uhmx = x[-1]\n\n fwhm = uhmx - lhmx\n cen = (uhmx + lhmx)/2\n\n return (_min, _max, min_at, max_at, half_max, com, mean, _int,\n fwhm, cen)\n"
] | [
[
"numpy.gradient",
"numpy.sum",
"numpy.ones",
"numpy.argmin",
"numpy.any",
"numpy.argmax",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.linspace",
"numpy.mean"
]
] |
rdarie/kCSD-python | [
"4dd0015e9c5598e7eceeeb25668e696e495b2026"
] | [
"figures/kCSD_properties/targeted_basis.py"
] | [
"\"\"\"\n@author: mkowalska\n\"\"\"\nimport os\nfrom os.path import expanduser\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport time\n\nfrom kcsd import ValidateKCSD, ValidateKCSD1D, SpectralStructure, KCSD1D\n\n__abs_file__ = os.path.abspath(__file__)\nhome = expanduser('~')\nDAY = datetime.datetime.now()\nDAY = DAY.strftime('%Y%m%d')\nTIMESTR = time.strftime(\"%H%M%S\")\nSAVE_PATH = home + \"/kCSD_results/\" + DAY + '/' + TIMESTR\n\n\ndef makemydir(directory):\n \"\"\"\n Creates a new folder if it doesn't exist\n\n Parameters\n ----------\n directory: string\n directory\n\n Returns\n -------\n None\n \"\"\"\n try:\n os.makedirs(directory)\n except OSError:\n pass\n os.chdir(directory)\n\n\ndef save_source_code(save_path, timestr):\n \"\"\"\n Saves the source code.\n\n Parameters\n ----------\n save_path: string\n directory\n timestr: float\n\n Returns\n -------\n None\n \"\"\"\n with open(save_path + '/source_code_' + str(timestr), 'w') as sf:\n sf.write(open(__file__).read())\n\n\ndef csd_profile(x, seed):\n '''Function used for adding multiple 1D gaussians.\n\n Parameters\n ----------\n x: numpy array\n x coordinates of true source profile.\n seed: list [r, mu]\n\n Returns\n -------\n gauss: numpy array\n Gaussian profile for given R and M.\n '''\n r = seed[0]\n mu = seed[1]\n STDDEV = r/3.0\n gauss = (np.exp(-((x - mu)**2)/(2 * STDDEV**2)) /\n (np.sqrt(2 * np.pi) * STDDEV)**1)\n gauss /= np.max(gauss)\n return gauss\n\n\ndef targeted_basis(val, csd_at, true_csd, ele_pos, pots, n_src, R, MU,\n true_csd_xlims, ele_lims, title, h=0.25, sigma=0.3,\n csd_res=100, method='cross-validation', Rs=None,\n lambdas=None):\n '''\n Function investigating kCSD analysis for targeted bases.\n\n Parameters\n ----------\n val: object of the class ValidateKCSD.\n csd_at: numpy array\n Coordinates of ground truth data.\n true_csd: numpy array\n Values of ground truth data (true_csd).\n ele_pos: numpy array\n Locations of electrodes.\n pots: numpy array\n Potentials measured (calculated) on electrodes.\n n_src: int\n Number of basis sources.\n R: float\n Thickness of the groundtruth source.\n MU: float\n x coordinate of maximum ampliude of groundtruth source.\n true_csd_xlims: list\n Boundaries for ground truth space.\n ele_lims: list\n Boundaries for electrodes placement.\n title: string\n Name of the figure that is to be saved\n h: float\n Thickness of analyzed cylindrical slice.\n Default: 0.25.\n sigma: float\n Space conductance of the medium.\n Default: 0.3.\n csd_res: int\n Resolution of ground truth.\n Default: 100.\n method: string\n Determines the method of regularization.\n Default: cross-validation.\n Rs: numpy 1D array\n Basis source parameter for crossvalidation.\n Default: None.\n lambdas: numpy 1D array\n Regularization parameter for crossvalidation.\n Default: None.\n\n Returns\n -------\n obj: object of the class KCSD1D\n k: object of the class ValidateKCSD1D\n '''\n k = ValidateKCSD1D(1, n_src_init=n_src, R_init=0.23,\n ele_lims=ele_lims, est_xres=0.01,\n true_csd_xlims=true_csd_xlims, sigma=sigma, h=h,\n src_type='gauss')\n obj, est_csd = k.do_kcsd(pots, ele_pos, method=method, Rs=Rs,\n lambdas=lambdas)\n test_csd = csd_profile(obj.estm_x, [R, MU])\n rms = val.calculate_rms(test_csd, est_csd)\n titl = \"Lambda: %0.2E; R: %0.2f; RMS_Error: %0.2E;\" % (obj.lambd, obj.R,\n rms)\n fig = k.make_plot(csd_at, true_csd, obj, est_csd, ele_pos, pots, titl)\n save_as = (SAVE_PATH)\n fig.savefig(os.path.join(SAVE_PATH, save_as + '/' + title + '.png'))\n plt.close()\n return obj, k\n\n\ndef simulate_data(csd_profile, true_csd_xlims, R, MU, total_ele, ele_lims,\n h=0.25, sigma=0.3, csd_res=100, noise=0):\n '''\n Generates groundtruth profiles and interpolates potentials.\n\n Parameters\n ----------\n csd_profile: function\n Function to produce csd profile.\n true_csd_xlims: list\n Boundaries for ground truth space.\n R: float\n Thickness of the groundtruth source.\n MU: float\n x coordinate of maximum ampliude of groundtruth source.\n total_ele: int\n Number of electrodes.\n ele_lims: list\n Boundaries for electrodes placement.\n h: float\n Thickness of analyzed cylindrical slice.\n Default: 0.25.\n sigma: float\n Space conductance of the medium.\n Default: 0.3.\n csd_res: int\n Resolution of ground truth.\n Default: 100.\n noise: float\n Determines the level of noise in the data.\n Default: 0.\n\n Returns\n -------\n csd_at: numpy array\n Coordinates of ground truth data.\n true_csd: numpy array\n Values of ground truth data (true_csd).\n ele_pos: numpy array\n Locations of electrodes.\n pots: numpy array\n Potentials measured (calculated) on electrodes.\n val: object of the class ValidateKCSD\n '''\n val = ValidateKCSD(1)\n csd_at = np.linspace(true_csd_xlims[0], true_csd_xlims[1], csd_res)\n true_csd = csd_profile(csd_at, [R, MU])\n ele_pos = val.generate_electrodes(total_ele=total_ele, ele_lims=ele_lims)\n pots = val.calculate_potential(true_csd, csd_at, ele_pos, h, sigma)\n if noise is not None:\n pots = val.add_noise(pots, 10, level=noise)\n return csd_at, true_csd, ele_pos, pots, val\n\n\ndef structure_investigation(csd_profile, true_csd_xlims, n_src, R, MU,\n total_ele, ele_lims, title, h=0.25, sigma=0.3,\n csd_res=100, method='cross-validation', Rs=None,\n lambdas=None, noise=0):\n '''\n .\n\n Parameters\n ----------\n csd_profile: function\n Function to produce csd profile.\n true_csd_xlims: list\n Boundaries for ground truth space.\n n_src: int\n Number of basis sources.\n R: float\n Thickness of the groundtruth source.\n MU: float\n x coordinate of maximum ampliude of groundtruth source.\n total_ele: int\n Number of electrodes.\n ele_lims: list\n Boundaries for electrodes placement.\n title: string\n Name of the figure that is to be saved\n h: float\n Thickness of analyzed cylindrical slice.\n Default: 0.25.\n sigma: float\n Space conductance of the medium.\n Default: 0.3.\n csd_res: int\n Resolution of ground truth.\n Default: 100.\n method: string\n Determines the method of regularization.\n Default: cross-validation.\n Rs: numpy 1D array\n Basis source parameter for crossvalidation.\n Default: None.\n lambdas: numpy 1D array\n Regularization parameter for crossvalidation.\n Default: None.\n noise: float\n Determines the level of noise in the data.\n Default: 0.\n\n Returns\n -------\n obj: object of the class KCSD1D\n '''\n val = ValidateKCSD(1)\n csd_at, true_csd, ele_pos, pots, val = simulate_data(csd_profile,\n true_csd_xlims, R, MU,\n total_ele, ele_lims,\n h=h, sigma=sigma,\n noise=noise)\n obj, k = targeted_basis(val, csd_at, true_csd, ele_pos, pots, n_src, R, MU,\n true_csd_xlims, ele_lims, title, h=0.25,\n sigma=0.3, csd_res=100, method=method, Rs=Rs,\n lambdas=lambdas)\n return obj\n\n\ndef plot_eigenvalues(eigenvalues, save_path, title):\n '''\n Creates plot of eigenvalues of kernel matrix (k_pot).\n\n Parameters\n ----------\n eigenvalues: numpy array\n Eigenvalues of k_pot matrix.\n save_path: string\n Directory.\n title: string\n Title of the plot.\n\n Returns\n -------\n None\n '''\n fig = plt.figure()\n plt.plot(eigenvalues, '--', marker='.')\n plt.title('Eigenvalue decomposition of kernel matrix. ele_lims=basis_lims')\n plt.xlabel('Number of components')\n plt.ylabel('Eigenvalues')\n plt.show()\n save_as = (save_path + '/eigenvalues_for_' + title)\n fig.savefig(os.path.join(save_path, save_as+'.png'))\n plt.close()\n\n\ndef plot_eigenvectors(eigenvectors, save_path, title):\n \"\"\"\n Creates plot of eigenvectors of kernel matrix (k_pot).\n\n Parameters\n ----------\n eigenvectors: numpy array\n Eigenvectors of k_pot matrix.\n save_path: string\n Directory.\n title: string\n Title of the plot.\n\n Returns\n -------\n None\n \"\"\"\n fig = plt.figure(figsize=(15, 15))\n plt.suptitle('Eigenvalue decomposition of kernel matrix for different '\n 'number of basis sources')\n for i in range(eigenvectors.shape[1]):\n plt.subplot(int(eigenvectors.shape[1]/2) + 1, 2, i + 1)\n plt.plot(eigenvectors[:, i].T, '--', marker='.')\n plt.ylabel('Eigenvectors')\n plt.title(r'$v_' + str(i + 1) + '$')\n plt.xlabel('Number of components')\n plt.tight_layout()\n plt.show()\n save_as = (save_path + '/eigenvectors_for_' + title)\n fig.savefig(os.path.join(save_path, save_as+'.png'))\n plt.close()\n\n\ndef modified_bases(val, pots, ele_pos, n_src, title=None, h=0.25, sigma=0.3,\n gdx=0.01, ext_x=0, xmin=0, xmax=1, R=0.2, MU=0.25,\n method='cross-validation', Rs=None, lambdas=None):\n '''\n Parameters\n ----------\n val: object of the class ValidateKCSD1D\n pots: numpy array\n Potentials measured (calculated) on electrodes.\n ele_pos: numpy array\n Locations of electrodes.\n n_src: int\n Number of basis sources.\n title: string\n Title of the plot.\n h: float\n Thickness of analyzed cylindrical slice.\n Default: 0.25.\n sigma: float\n Space conductance of the medium.\n Default: 0.3.\n gdx: float\n Space increments in the estimation space.\n Default: 0.035.\n ext_x: float\n Length of space extension: xmin-ext_x ... xmax+ext_x.\n Default: 0.\n xmin: float\n Boundaries for CSD estimation space.\n xmax: float\n boundaries for CSD estimation space.\n R: float\n Thickness of the groundtruth source.\n Default: 0.2.\n MU: float\n Central position of Gaussian source\n Default: 0.25.\n method: string\n Determines the method of regularization.\n Default: cross-validation.\n Rs: numpy 1D array\n Basis source parameter for crossvalidation.\n Default: None.\n lambdas: numpy 1D array\n Regularization parameter for crossvalidation.\n Default: None.\n\n Returns\n -------\n obj_m: object of the class KCSD1D\n '''\n pots = pots.reshape((len(ele_pos), 1))\n obj_m = KCSD1D(ele_pos, pots, src_type='gauss', sigma=sigma, h=h, gdx=gdx,\n n_src_init=n_src, ext_x=ext_x, xmin=xmin, xmax=xmax)\n if method == 'cross-validation':\n obj_m.cross_validate(Rs=Rs, lambdas=lambdas)\n elif method == 'L-curve':\n obj_m.L_curve(Rs=Rs, lambdas=lambdas)\n est_csd = obj_m.values('CSD')\n test_csd = csd_profile(obj_m.estm_x, [R, MU])\n rms = val.calculate_rms(test_csd, est_csd)\n# titl = \"Lambda: %0.2E; R: %0.2f; RMS_Error: %0.2E;\" % (obj_m.lambd,\n# obj_m.R, rms)\n# fig = k.make_plot(csd_at, true_csd, obj_m, est_csd, ele_pos, pots, titl)\n# save_as = (SAVE_PATH)\n# fig.savefig(os.path.join(SAVE_PATH, save_as + '/' + title + '.png'))\n# plt.close()\n# ss = SpectralStructure(obj_m)\n# eigenvectors, eigenvalues = ss.evd()\n return obj_m\n\n\ndef plot_k_interp_cross_v(k_icross, eigenvectors, save_path, title):\n \"\"\"\n Creates plot of product of cross kernel vectors and eigenvectors for\n different number of basis sources\n\n Parameters\n ----------\n k_icross: numpy array\n List of cross kernel matrixes for different number of basis sources.\n eigenvectors: numpy array\n Eigenvectors of k_pot matrix.\n save_path: string\n Directory.\n title: string\n Name of the figure that is to be saved.\n\n Returns\n -------\n None\n \"\"\"\n fig = plt.figure(figsize=(15, 15))\n for i in range(eigenvectors.shape[0]):\n plt.subplot(int(k_icross.shape[1]/2) + 1, 2, i + 1)\n plt.plot(np.dot(k_icross, eigenvectors[:, i]), '--',\n marker='.')\n plt.title(r'$\\tilde{K}*v_' + str(i + 1) + '$')\n# plt.ylabel('Product K~V')\n plt.xlabel('Number of estimation points')\n fig.tight_layout()\n plt.show()\n save_path = save_path + '/cross_kernel'\n makemydir(save_path)\n save_as = (save_path + '/cross_kernel_eigenvector_product' + title)\n fig.savefig(os.path.join(save_path, save_as+'.png'))\n plt.close()\n\n\nif __name__ == '__main__':\n makemydir(SAVE_PATH)\n save_source_code(SAVE_PATH, time.strftime(\"%Y%m%d-%H%M%S\"))\n\n CSD_SEED = 15\n N_SRC = 64\n ELE_LIMS = [0, 1.] # range of electrodes space\n TRUE_CSD_XLIMS = [0., 1.]\n TOTAL_ELE = 12\n noise = 0\n method = 'cross-validation'\n Rs = None\n lambdas = None\n\n # A\n R = 0.2\n MU = 0.25\n csd_at, true_csd, ele_pos, pots, val = simulate_data(csd_profile,\n TRUE_CSD_XLIMS, R, MU,\n TOTAL_ELE, ELE_LIMS,\n noise=noise)\n title = 'A_basis_lims_0_1'\n obj, k = targeted_basis(val, csd_at, true_csd, ele_pos, pots, N_SRC, R, MU,\n TRUE_CSD_XLIMS, ELE_LIMS, title, method=method, Rs=Rs,\n lambdas=lambdas)\n ss = SpectralStructure(obj)\n eigenvectors, eigenvalues = ss.evd()\n plot_eigenvalues(eigenvalues, SAVE_PATH, title)\n plot_eigenvectors(eigenvectors, SAVE_PATH, title)\n plot_k_interp_cross_v(obj.k_interp_cross, eigenvectors, SAVE_PATH, title)\n\n # A.2\n title = 'A_basis_lims_0_0_5'\n modified_bases(val, pots, ele_pos, N_SRC, title, h=0.25, sigma=0.3,\n gdx=0.01, ext_x=0, xmin=0, xmax=0.5, method=method, Rs=Rs,\n lambdas=lambdas)\n\n # A.2.b\n title = 'A_basis_lims_0_0_5_less_sources'\n modified_bases(val, pots, ele_pos, N_SRC/2, title, h=0.25, sigma=0.3,\n gdx=0.01, ext_x=0, xmin=0, xmax=0.5, method=method, Rs=Rs,\n lambdas=lambdas)\n\n # B\n TRUE_CSD_XLIMS = [0., 1.5]\n R = 0.2\n MU = 1.25\n csd_at, true_csd, ele_pos, pots, val = simulate_data(csd_profile,\n TRUE_CSD_XLIMS, R, MU,\n TOTAL_ELE, ELE_LIMS,\n noise=noise)\n title = 'B_basis_lims_0_1'\n obj, k = targeted_basis(val, csd_at, true_csd, ele_pos, pots, N_SRC, R, MU,\n TRUE_CSD_XLIMS, ELE_LIMS, title, method=method, Rs=Rs,\n lambdas=lambdas)\n ss = SpectralStructure(obj)\n eigenvectors, eigenvalues = ss.evd()\n plot_eigenvalues(eigenvalues, SAVE_PATH, title)\n plot_eigenvectors(eigenvectors, SAVE_PATH, title)\n plot_k_interp_cross_v(obj.k_interp_cross, eigenvectors, SAVE_PATH, title)\n\n # B.2\n title = 'B_basis_lims_1_1_5'\n modified_bases(val, pots, ele_pos, N_SRC, title, h=0.25, sigma=0.3,\n gdx=0.01, ext_x=0, xmin=1, xmax=1.5, method=method, Rs=Rs,\n lambdas=lambdas)\n\n # B.2.b\n title = 'B_basis_lims_1_1_5_less_sources'\n modified_bases(val, pots, ele_pos, N_SRC/2, title, h=0.25, sigma=0.3,\n gdx=0.01, ext_x=0, xmin=1, xmax=1.5, method=method, Rs=Rs,\n lambdas=lambdas)\n\n # B.3\n title = 'B_basis_lims_0_1_5'\n modified_bases(val, pots, ele_pos, N_SRC, title, h=0.25, sigma=0.3,\n gdx=0.01, ext_x=0, xmin=0, xmax=1.5, method=method, Rs=Rs,\n lambdas=lambdas)\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.exp",
"matplotlib.pyplot.title",
"numpy.max",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"matplotlib.pyplot.suptitle",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.dot",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
chanyh0/PyTorch-StudioGAN | [
"5a912affc1ec975d97a33a12d1c96d05d4b883f0"
] | [
"src/train_eval.py"
] | [
"# PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN\n# The MIT License (MIT)\n# See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details\n\n# train_eval.py\n\n\nimport numpy as np\nimport sys\nimport glob\nfrom scipy import ndimage\nfrom os.path import join\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom datetime import datetime\n\nfrom metrics.IS import calculate_incep_score\nfrom metrics.FID import calculate_fid_score\nfrom metrics.F_beta import calculate_f_beta_score\nfrom metrics.Accuracy import calculate_accuracy\nfrom utils.ada import augment\nfrom utils.biggan_utils import interp\nfrom utils.sample import sample_latents, sample_1hot, make_mask, target_class_sampler\nfrom utils.misc import *\nfrom utils.losses import calc_derv4gp, calc_derv4dra, calc_derv, latent_optimise\nfrom utils.losses import Conditional_Contrastive_loss, Proxy_NCA_loss, NT_Xent_loss\nfrom utils.diff_aug import DiffAugment\nfrom utils.cr_diff_aug import CR_DiffAug\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import DataParallel\nimport torch.nn.functional as F\nimport torchvision\nfrom torchvision import transforms\n\n\nSAVE_FORMAT = 'step={step:0>3}-Inception_mean={Inception_mean:<.4}-Inception_std={Inception_std:<.4}-FID={FID:<.5}.pth'\n\nLOG_FORMAT = (\n \"Step: {step:>7} \"\n \"Progress: {progress:<.1%} \"\n \"Elapsed: {elapsed} \"\n \"temperature: {temperature:<.6} \"\n \"ada_p: {ada_p:<.6} \"\n \"Discriminator_loss: {dis_loss:<.6} \"\n \"Generator_loss: {gen_loss:<.6} \"\n)\n\n\ndef set_temperature(conditional_strategy, tempering_type, start_temperature, end_temperature, step_count, tempering_step, total_step):\n if conditional_strategy == 'ContraGAN':\n if tempering_type == 'continuous':\n t = start_temperature + step_count*(end_temperature - start_temperature)/total_step\n elif tempering_type == 'discrete':\n tempering_interval = total_step//(tempering_step + 1)\n t = start_temperature + \\\n (step_count//tempering_interval)*(end_temperature-start_temperature)/tempering_step\n else:\n t = start_temperature\n else:\n t = 'no'\n return t\n\n\nclass Train_Eval(object):\n def __init__(self, run_name, best_step, dataset_name, eval_type, logger, writer, n_gpus, gen_model, dis_model, inception_model,\n Gen_copy, Gen_ema, train_dataset, eval_dataset, train_dataloader, eval_dataloader, freeze_layers, conditional_strategy,\n pos_collected_numerator, z_dim, num_classes, hypersphere_dim, d_spectral_norm, g_spectral_norm, G_optimizer, D_optimizer,\n batch_size, g_steps_per_iter, d_steps_per_iter, accumulation_steps, total_step, G_loss, D_loss, contrastive_lambda, margin,\n tempering_type, tempering_step, start_temperature, end_temperature, weight_clipping_for_dis, weight_clipping_bound,\n gradient_penalty_for_dis, gradient_penalty_lambda, deep_regret_analysis_for_dis, regret_penalty_lambda, cr, cr_lambda, bcr,\n real_lambda, fake_lambda, zcr, gen_lambda, dis_lambda, sigma_noise, diff_aug, ada, prev_ada_p, ada_target, ada_length, prior,\n truncated_factor, ema, latent_op, latent_op_rate, latent_op_step, latent_op_step4eval, latent_op_alpha, latent_op_beta,\n latent_norm_reg_weight, default_device, print_every, save_every, checkpoint_dir, evaluate, mu, sigma, best_fid,\n best_fid_checkpoint_path, mixed_precision, train_config, model_config, gamma, steps):\n\n self.run_name = run_name\n self.best_step = best_step\n self.dataset_name = dataset_name\n self.eval_type = eval_type\n self.logger = logger\n self.writer = writer\n self.n_gpus = n_gpus\n\n self.gen_model = gen_model\n self.dis_model = dis_model\n self.inception_model = inception_model\n self.Gen_copy = Gen_copy\n self.Gen_ema = Gen_ema\n\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.train_dataloader = train_dataloader\n self.eval_dataloader = eval_dataloader\n\n self.freeze_layers = freeze_layers\n\n self.conditional_strategy = conditional_strategy\n self.pos_collected_numerator = pos_collected_numerator\n self.z_dim = z_dim\n self.num_classes = num_classes\n self.hypersphere_dim = hypersphere_dim\n self.d_spectral_norm = d_spectral_norm\n self.g_spectral_norm = g_spectral_norm\n\n self.G_optimizer = G_optimizer\n self.D_optimizer = D_optimizer\n self.batch_size = batch_size\n self.g_steps_per_iter = g_steps_per_iter\n self.d_steps_per_iter = d_steps_per_iter\n self.accumulation_steps = accumulation_steps\n self.total_step = total_step\n\n self.G_loss = G_loss\n self.D_loss = D_loss\n self.contrastive_lambda = contrastive_lambda\n self.margin = margin\n self.tempering_type = tempering_type\n self.tempering_step = tempering_step\n self.start_temperature = start_temperature\n self.end_temperature = end_temperature\n self.weight_clipping_for_dis = weight_clipping_for_dis\n self.weight_clipping_bound = weight_clipping_bound\n self.gradient_penalty_for_dis = gradient_penalty_for_dis\n self.gradient_penalty_lambda = gradient_penalty_lambda\n self.deep_regret_analysis_for_dis = deep_regret_analysis_for_dis\n self.regret_penalty_lambda = regret_penalty_lambda\n self.cr = cr\n self.cr_lambda = cr_lambda\n self.bcr = bcr\n self.real_lambda = real_lambda\n self.fake_lambda = fake_lambda\n self.zcr = zcr\n self.gen_lambda = gen_lambda\n self.dis_lambda = dis_lambda\n self.sigma_noise = sigma_noise\n\n self.diff_aug = diff_aug\n self.ada = ada\n self.prev_ada_p = prev_ada_p\n self.ada_target = ada_target\n self.ada_length = ada_length\n self.prior = prior\n self.truncated_factor = truncated_factor\n self.ema = ema\n self.latent_op = latent_op\n self.latent_op_rate = latent_op_rate\n self.latent_op_step = latent_op_step\n self.latent_op_step4eval = latent_op_step4eval\n self.latent_op_alpha = latent_op_alpha\n self.latent_op_beta = latent_op_beta\n self.latent_norm_reg_weight = latent_norm_reg_weight\n\n self.default_device = default_device\n self.print_every = print_every\n self.save_every = save_every\n self.checkpoint_dir = checkpoint_dir\n self.evaluate = evaluate\n self.mu = mu\n self.sigma = sigma\n self.best_fid = best_fid\n self.best_fid_checkpoint_path = best_fid_checkpoint_path\n self.mixed_precision = mixed_precision\n self.train_config = train_config\n self.model_config = model_config\n\n self.start_time = datetime.now()\n self.l2_loss = torch.nn.MSELoss()\n self.ce_loss = torch.nn.CrossEntropyLoss()\n self.policy = \"color,translation,cutout\"\n\n self.steps = steps\n self.gamma = gamma\n\n sampler = define_sampler(self.dataset_name, self.conditional_strategy)\n\n check_flag_1(self.tempering_type, self.pos_collected_numerator, self.conditional_strategy, self.diff_aug, self.ada,\n self.mixed_precision, self.gradient_penalty_for_dis, self.deep_regret_analysis_for_dis, self.cr, self.bcr, self.zcr)\n\n if self.conditional_strategy == 'ContraGAN':\n self.contrastive_criterion = Conditional_Contrastive_loss(self.default_device, self.batch_size, self.pos_collected_numerator)\n\n elif self.conditional_strategy == 'Proxy_NCA_GAN':\n if isinstance(self.dis_model, DataParallel):\n self.embedding_layer = self.dis_model.module.embedding\n else:\n self.embedding_layer = self.dis_model.embedding\n self.NCA_criterion = Proxy_NCA_loss(self.default_device, self.embedding_layer, self.num_classes, self.batch_size)\n\n elif self.conditional_strategy == 'NT_Xent_GAN':\n self.NT_Xent_criterion = NT_Xent_loss(self.default_device, self.batch_size)\n else:\n pass\n\n if self.mixed_precision:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.dataset_name in [\"imagenet\"]:\n self.num_eval = {'train':50000, 'valid':50000}\n elif self.dataset_name in [\"imagenet_less_0.25\"]:\n self.num_eval = {'train':50000, 'valid':50000}\n elif self.dataset_name in [\"imagenet_less\"]:\n self.num_eval = {'train':50000, 'valid':50000}\n elif self.dataset_name == \"tiny_imagenet\":\n self.num_eval = {'train':50000, 'valid':10000}\n elif self.dataset_name == \"cifar10\":\n self.num_eval = {'train':50000, 'test':10000}\n elif self.dataset_name == \"cifar10_less\":\n self.num_eval = {'train':len(self.train_dataset.data), 'valid':len(self.eval_dataset.data), 'test':len(self.eval_dataset.data)}\n elif self.dataset_name in [\"cifar100_less\"]:\n self.num_eval = {'train':len(self.train_dataset.data), 'valid':len(self.eval_dataset.data), 'test':len(self.eval_dataset.data)}\n elif self.dataset_name == \"custom\":\n num_train_images = len(self.train_dataset.data)\n num_eval_images = len(self.eval_dataset.data)\n self.num_eval = {'train':num_train_images, 'valid':num_eval_images}\n else:\n raise NotImplementedError\n\n\n ################################################################################################################################\n def train(self, current_step, total_step):\n self.dis_model.train()\n self.gen_model.train()\n if self.Gen_copy is not None:\n self.Gen_copy.train()\n\n self.logger.info('Start training....')\n step_count = current_step\n train_iter = iter(self.train_dataloader)\n\n if self.ada:\n self.ada_augment = torch.tensor([0.0, 0.0], device = self.default_device)\n if self.prev_ada_p is not None:\n self.ada_aug_p = self.prev_ada_p\n else:\n self.ada_aug_p = 0.0\n self.ada_aug_step = self.ada_target/self.ada_length\n else:\n self.ada_aug_p = 'No'\n\n while step_count <= total_step:\n # ================== TRAIN D ================== #\n toggle_grad(self.dis_model, True, freeze_layers=self.freeze_layers)\n toggle_grad(self.gen_model, False, freeze_layers=-1)\n t = set_temperature(self.conditional_strategy, self.tempering_type, self.start_temperature, self.end_temperature, step_count, self.tempering_step, total_step)\n for step_index in range(self.d_steps_per_iter):\n self.D_optimizer.zero_grad()\n for acml_index in range(self.accumulation_steps):\n try:\n real_images, real_labels = next(train_iter)\n except StopIteration:\n train_iter = iter(self.train_dataloader)\n real_images, real_labels = next(train_iter)\n\n real_images, real_labels = real_images.to(self.default_device), real_labels.to(self.default_device)\n with torch.cuda.amp.autocast() if self.mixed_precision else dummy_context_mgr() as mpc:\n if self.diff_aug:\n real_images = DiffAugment(real_images, policy=self.policy)\n if self.ada:\n real_images, _ = augment(real_images, self.ada_aug_p)\n\n if self.zcr:\n zs, fake_labels, zs_t = sample_latents(self.prior, self.batch_size, self.z_dim, 1, self.num_classes,\n self.sigma_noise, self.default_device)\n else:\n zs, fake_labels = sample_latents(self.prior, self.batch_size, self.z_dim, 1, self.num_classes,\n None, self.default_device)\n if self.latent_op:\n zs = latent_optimise(zs, fake_labels, self.gen_model, self.dis_model, self.conditional_strategy,\n self.latent_op_step, self.latent_op_rate, self.latent_op_alpha, self.latent_op_beta,\n False, self.default_device)\n \n fake_images = self.gen_model(zs, fake_labels)\n if self.diff_aug:\n fake_images = DiffAugment(fake_images, policy=self.policy)\n if self.ada:\n fake_images, _ = augment(fake_images, self.ada_aug_p)\n\n if self.conditional_strategy == \"ACGAN\":\n cls_out_real, dis_out_real = self.dis_model(real_images, real_labels)\n cls_out_fake, dis_out_fake = self.dis_model(fake_images, fake_labels)\n elif self.conditional_strategy == \"ProjGAN\" or self.conditional_strategy == \"no\":\n dis_out_real = self.dis_model(real_images, real_labels)\n dis_out_fake = self.dis_model(fake_images, fake_labels)\n elif self.conditional_strategy in [\"NT_Xent_GAN\", \"Proxy_NCA_GAN\", \"ContraGAN\"]:\n real_cls_mask = make_mask(real_labels, self.num_classes, self.default_device)\n cls_proxies_real, cls_embed_real, dis_out_real = self.dis_model(real_images, real_labels)\n cls_proxies_fake, cls_embed_fake, dis_out_fake = self.dis_model(fake_images, fake_labels)\n elif self.conditional_strategy == 'ProjGAN_adv':\n dis_out_real_prefc = self.dis_model(real_images, real_labels, fc=False)\n dis_out_fake_prefc = self.dis_model(fake_images, fake_labels, fc=False)\n \n loss_real = lambda x: torch.mean(F.relu(1. - x))\n loss_fake = lambda x: torch.mean(F.relu(1. + x))\n dis_out_real_prefc_adv = PGD(dis_out_real_prefc, real_labels, loss_real, self.dis_model, steps=self.steps, gamma=self.gamma)\n dis_out_fake_prefc_adv = PGD(dis_out_fake_prefc, fake_labels, loss_real, self.dis_model, steps=self.steps, gamma=self.gamma)\n\n fake_images = fake_images.detach()\n dis_out_real_prefc = self.dis_model(real_images, real_labels, fc=False, only_fc=False)\n dis_out_fake_prefc = self.dis_model(fake_images, fake_labels, fc=False, only_fc=False)\n\n dis_out_real = self.dis_model(dis_out_real_prefc, real_labels, only_fc=True, fc=True)\n dis_out_fake = self.dis_model(dis_out_fake_prefc, fake_labels, only_fc=True, fc=True)\n\n dis_out_real_adv = self.dis_model(dis_out_real_prefc_adv, real_labels, only_fc=True)\n dis_out_fake_adv = self.dis_model(dis_out_fake_prefc_adv, fake_labels, only_fc=True)\n\n\n else:\n raise NotImplementedError\n \n #if self.conditional_strategy != 'ProjGAN_adv':\n if self.conditional_strategy != 'ProjGAN_adv':\n dis_acml_loss = self.D_loss(dis_out_real, dis_out_fake)\n else:\n dis_acml_loss = (self.D_loss(dis_out_real, dis_out_fake) + self.D_loss(dis_out_real_adv, dis_out_fake_adv)) / 2\n\n if self.conditional_strategy == \"ACGAN\":\n dis_acml_loss += (self.ce_loss(cls_out_real, real_labels) + self.ce_loss(cls_out_fake, fake_labels))\n elif self.conditional_strategy == \"NT_Xent_GAN\":\n real_images_aug = CR_DiffAug(real_images)\n _, cls_embed_real_aug, dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n dis_acml_loss += self.contrastive_lambda*self.NT_Xent_criterion(cls_embed_real, cls_embed_real_aug, t)\n elif self.conditional_strategy == \"Proxy_NCA_GAN\":\n dis_acml_loss += self.contrastive_lambda*self.NCA_criterion(cls_embed_real, cls_proxies_real, real_labels)\n elif self.conditional_strategy == \"ContraGAN\":\n dis_acml_loss += self.contrastive_lambda*self.contrastive_criterion(cls_embed_real, cls_proxies_real,\n real_cls_mask, real_labels, t, self.margin)\n else:\n pass\n\n if self.cr:\n real_images_aug = CR_DiffAug(real_images)\n if self.conditional_strategy == \"ACGAN\":\n cls_out_real_aug, dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n cls_consistency_loss = self.l2_loss(cls_out_real, cls_out_real_aug)\n elif self.conditional_strategy == \"ProjGAN\" or self.conditional_strategy == \"no\":\n dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n elif self.conditional_strategy in [\"NT_Xent_GAN\", \"Proxy_NCA_GAN\", \"ContraGAN\"]:\n _, cls_embed_real_aug, dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n cls_consistency_loss = self.l2_loss(cls_embed_real, cls_embed_real_aug)\n elif self.conditional_strategy == \"ProjGAN_adv\":\n dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n else:\n raise NotImplementedError\n\n consistency_loss = self.l2_loss(dis_out_real, dis_out_real_aug)\n if self.conditional_strategy in [\"ACGAN\", \"NT_Xent_GAN\", \"Proxy_NCA_GAN\", \"ContraGAN\"]:\n consistency_loss += cls_consistency_loss\n dis_acml_loss += self.cr_lambda*consistency_loss\n\n if self.bcr:\n real_images_aug = CR_DiffAug(real_images)\n fake_images_aug = CR_DiffAug(fake_images)\n if self.conditional_strategy == \"ACGAN\":\n cls_out_real_aug, dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n cls_out_fake_aug, dis_out_fake_aug = self.dis_model(fake_images_aug, fake_labels)\n cls_bcr_real_loss = self.l2_loss(cls_out_real, cls_out_real_aug)\n cls_bcr_fake_loss = self.l2_loss(cls_out_fake, cls_out_fake_aug)\n elif self.conditional_strategy == \"ProjGAN\" or self.conditional_strategy == \"no\":\n dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n dis_out_fake_aug = self.dis_model(fake_images_aug, fake_labels)\n elif self.conditional_strategy in [\"ContraGAN\", \"Proxy_NCA_GAN\", \"NT_Xent_GAN\"]:\n cls_proxies_real_aug, cls_embed_real_aug, dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n cls_proxies_fake_aug, cls_embed_fake_aug, dis_out_fake_aug = self.dis_model(fake_images_aug, fake_labels)\n cls_bcr_real_loss = self.l2_loss(cls_embed_real, cls_embed_real_aug)\n cls_bcr_fake_loss = self.l2_loss(cls_embed_fake, cls_embed_fake_aug)\n elif self.conditional_strategy == \"ProjGAN_adv\":\n dis_out_real_aug = self.dis_model(real_images_aug, real_labels)\n dis_out_fake_aug = self.dis_model(fake_images_aug, fake_labels)\n else:\n raise NotImplementedError\n\n bcr_real_loss = self.l2_loss(dis_out_real, dis_out_real_aug)\n bcr_fake_loss = self.l2_loss(dis_out_fake, dis_out_fake_aug)\n if self.conditional_strategy in [\"ACGAN\", \"NT_Xent_GAN\", \"Proxy_NCA_GAN\", \"ContraGAN\"]:\n bcr_real_loss += cls_bcr_real_loss\n bcr_fake_loss += cls_bcr_fake_loss\n dis_acml_loss += self.real_lambda*bcr_real_loss + self.fake_lambda*bcr_fake_loss\n\n if self.zcr:\n fake_images_zaug = self.gen_model(zs_t, fake_labels)\n if self.conditional_strategy == \"ACGAN\":\n cls_out_fake_zaug, dis_out_fake_zaug = self.dis_model(fake_images_zaug, fake_labels)\n cls_zcr_dis_loss = self.l2_loss(cls_out_fake, cls_out_fake_zaug)\n elif self.conditional_strategy == \"ProjGAN\" or self.conditional_strategy == \"no\":\n dis_out_fake_zaug = self.dis_model(fake_images_zaug, fake_labels)\n elif self.conditional_strategy in [\"ContraGAN\", \"Proxy_NCA_GAN\", \"NT_Xent_GAN\"]:\n cls_proxies_fake_zaug, cls_embed_fake_zaug, dis_out_fake_zaug = self.dis_model(fake_images_zaug, fake_labels)\n cls_zcr_dis_loss = self.l2_loss(cls_embed_fake, cls_embed_fake_zaug)\n elif self.conditional_strategy == \"ProjGAN_adv\":\n dis_out_fake_zaug = self.dis_model(fake_images_zaug, fake_labels)\n else:\n raise NotImplementedError\n\n zcr_dis_loss = self.l2_loss(dis_out_fake, dis_out_fake_zaug)\n if self.conditional_strategy in [\"ACGAN\", \"NT_Xent_GAN\", \"Proxy_NCA_GAN\", \"ContraGAN\"]:\n zcr_dis_loss += cls_zcr_dis_loss\n dis_acml_loss += self.dis_lambda*zcr_dis_loss\n\n if self.gradient_penalty_for_dis:\n dis_acml_loss += self.gradient_penalty_lambda*calc_derv4gp(self.dis_model, self.conditional_strategy, real_images,\n fake_images, real_labels, self.default_device)\n if self.deep_regret_analysis_for_dis:\n dis_acml_loss += self.regret_penalty_lambda*calc_derv4dra(self.dis_model, self.conditional_strategy, real_images,\n real_labels, self.default_device)\n if self.ada:\n ada_aug_data = torch.tensor((torch.sign(dis_out_real).sum().item(), dis_out_real.shape[0]), device = self.default_device)\n self.ada_augment += ada_aug_data\n if self.ada_augment[1] > (self.batch_size*4 - 1):\n authen_out_signs, num_outputs = self.ada_augment.tolist()\n r_t_stat = authen_out_signs/num_outputs\n sign = 1 if r_t_stat > self.ada_target else -1\n self.ada_aug_p += sign*self.ada_aug_step*num_outputs\n self.ada_aug_p = min(1.0, max(0.0, self.ada_aug_p))\n self.ada_augment.mul_(0.0)\n\n dis_acml_loss = dis_acml_loss/self.accumulation_steps\n\n if self.mixed_precision:\n self.scaler.scale(dis_acml_loss).backward()\n else:\n dis_acml_loss.backward()\n\n if self.mixed_precision:\n self.scaler.step(self.D_optimizer)\n self.scaler.update()\n else:\n self.D_optimizer.step()\n\n if self.weight_clipping_for_dis:\n for p in self.dis_model.parameters():\n p.data.clamp_(-self.weight_clipping_bound, self.weight_clipping_bound)\n\n if step_count % self.print_every == 0 and step_count !=0 and self.logger:\n if self.d_spectral_norm:\n dis_sigmas = calculate_all_sn(self.dis_model)\n self.writer.add_scalars('SN_of_dis', dis_sigmas, step_count)\n\n # ================== TRAIN G ================== #\n toggle_grad(self.dis_model, False, freeze_layers=-1)\n toggle_grad(self.gen_model, True, freeze_layers=-1)\n for step_index in range(self.g_steps_per_iter):\n self.G_optimizer.zero_grad()\n for acml_step in range(self.accumulation_steps):\n with torch.cuda.amp.autocast() if self.mixed_precision else dummy_context_mgr() as mpc:\n if self.zcr:\n zs, fake_labels, zs_t = sample_latents(self.prior, self.batch_size, self.z_dim, 1, self.num_classes,\n self.sigma_noise, self.default_device)\n else:\n zs, fake_labels = sample_latents(self.prior, self.batch_size, self.z_dim, 1, self.num_classes,\n None, self.default_device)\n if self.latent_op:\n zs, transport_cost = latent_optimise(zs, fake_labels, self.gen_model, self.dis_model, self.conditional_strategy,\n self.latent_op_step, self.latent_op_rate, self.latent_op_alpha,\n self.latent_op_beta, True, self.default_device)\n if not self.conditional_strategy == 'ProjGAN_adv':\n fake_images = self.gen_model(zs, fake_labels)\n else:\n gen_out_prefc, labels_prefc = self.gen_model(zs, fake_labels, only_l1=True)\n \n loss_fake = lambda x: -torch.mean(x)\n gen_out_adv = PGD_G(gen_out_prefc, labels_prefc, fake_labels, loss_fake, self.gen_model, self.dis_model, steps=self.steps, gamma=self.gamma)\n \n fake_images = self.gen_model(gen_out_prefc, labels_prefc, l1=False)\n fake_images_adv = self.gen_model(gen_out_adv, labels_prefc, l1=False)\n\n\n\n if self.diff_aug:\n fake_images = DiffAugment(fake_images, policy=self.policy)\n if self.ada:\n fake_images, _ = augment(fake_images, self.ada_aug_p)\n\n if self.conditional_strategy == \"ACGAN\":\n cls_out_fake, dis_out_fake = self.dis_model(fake_images, fake_labels)\n elif self.conditional_strategy == \"ProjGAN\" or self.conditional_strategy == \"no\":\n dis_out_fake = self.dis_model(fake_images, fake_labels)\n elif self.conditional_strategy in [\"NT_Xent_GAN\", \"Proxy_NCA_GAN\", \"ContraGAN\"]:\n fake_cls_mask = make_mask(fake_labels, self.num_classes, self.default_device)\n cls_proxies_fake, cls_embed_fake, dis_out_fake = self.dis_model(fake_images, fake_labels)\n elif self.conditional_strategy == 'ProjGAN_adv':\n dis_out_fake = self.dis_model(fake_images, fake_labels)\n dis_out_adv = self.dis_model(fake_images_adv, fake_labels)\n\n else:\n raise NotImplementedError\n\n gen_acml_loss = self.G_loss(dis_out_fake)\n if self.latent_op:\n gen_acml_loss += transport_cost*self.latent_norm_reg_weight\n\n if self.zcr:\n fake_images_zaug = self.gen_model(zs_t, fake_labels)\n zcr_gen_loss = -1 * self.l2_loss(fake_images, fake_images_zaug)\n gen_acml_loss += self.gen_lambda*zcr_gen_loss\n\n if self.conditional_strategy == \"ACGAN\":\n gen_acml_loss += self.ce_loss(cls_out_fake, fake_labels)\n elif self.conditional_strategy == \"ContraGAN\":\n gen_acml_loss += self.contrastive_lambda*self.contrastive_criterion(cls_embed_fake, cls_proxies_fake, fake_cls_mask, fake_labels, t, self.margin)\n elif self.conditional_strategy == \"Proxy_NCA_GAN\":\n gen_acml_loss += self.contrastive_lambda*self.NCA_criterion(cls_embed_fake, cls_proxies_fake, fake_labels)\n elif self.conditional_strategy == \"NT_Xent_GAN\":\n fake_images_aug = CR_DiffAug(fake_images)\n _, cls_embed_fake_aug, dis_out_fake_aug = self.dis_model(fake_images_aug, fake_labels)\n gen_acml_loss += self.contrastive_lambda*self.NT_Xent_criterion(cls_embed_fake, cls_embed_fake_aug, t)\n elif self.conditional_strategy == 'ProjGAN_adv':\n gen_acml_loss = (self.G_loss(dis_out_fake) + self.G_loss(dis_out_adv)) / 2\n else:\n pass\n\n gen_acml_loss = gen_acml_loss/self.accumulation_steps\n\n if self.mixed_precision:\n self.scaler.scale(gen_acml_loss).backward()\n else:\n gen_acml_loss.backward()\n\n if self.mixed_precision:\n self.scaler.step(self.G_optimizer)\n self.scaler.update()\n else:\n self.G_optimizer.step()\n\n # if ema is True: we update parameters of the Gen_copy in adaptive way.\n if self.ema:\n self.Gen_ema.update(step_count)\n\n step_count += 1\n\n if step_count % self.print_every == 0 and self.logger:\n log_message = LOG_FORMAT.format(step=step_count,\n progress=step_count/total_step,\n elapsed=elapsed_time(self.start_time),\n temperature=t,\n ada_p=self.ada_aug_p,\n dis_loss=dis_acml_loss.item(),\n gen_loss=gen_acml_loss.item(),\n )\n self.logger.info(log_message)\n\n if self.g_spectral_norm:\n gen_sigmas = calculate_all_sn(self.gen_model)\n self.writer.add_scalars('SN_of_gen', gen_sigmas, step_count)\n\n self.writer.add_scalars('Losses', {'discriminator': dis_acml_loss.item(),\n 'generator': gen_acml_loss.item()}, step_count)\n if self.ada:\n self.writer.add_scalar('ada_p', self.ada_aug_p, step_count)\n\n if step_count % self.save_every == 0 or step_count == total_step:\n if self.evaluate:\n is_best = self.evaluation(step_count, False, \"N/A\")\n self.save(step_count, is_best)\n else:\n self.save(step_count, False)\n return step_count-1\n ################################################################################################################################\n\n\n ################################################################################################################################\n def save(self, step, is_best):\n when = \"best\" if is_best is True else \"current\"\n self.dis_model.eval()\n self.gen_model.eval()\n if self.Gen_copy is not None:\n self.Gen_copy.eval()\n\n if isinstance(self.gen_model, DataParallel):\n gen = self.gen_model.module\n dis = self.dis_model.module\n if self.Gen_copy is not None:\n gen_copy = self.Gen_copy.module\n else:\n gen, dis = self.gen_model, self.dis_model\n if self.Gen_copy is not None:\n gen_copy = self.Gen_copy\n\n g_states = {'seed': self.train_config['seed'], 'run_name': self.run_name, 'step': step, 'best_step': self.best_step,\n 'state_dict': gen.state_dict(), 'optimizer': self.G_optimizer.state_dict(), 'ada_p': self.ada_aug_p}\n\n d_states = {'seed': self.train_config['seed'], 'run_name': self.run_name, 'step': step, 'best_step': self.best_step,\n 'state_dict': dis.state_dict(), 'optimizer': self.D_optimizer.state_dict(), 'ada_p': self.ada_aug_p,\n 'best_fid': self.best_fid, 'best_fid_checkpoint_path': self.checkpoint_dir}\n\n if len(glob.glob(join(self.checkpoint_dir,\"model=G-{when}-weights-step*.pth\".format(when=when)))) >= 1:\n find_and_remove(glob.glob(join(self.checkpoint_dir,\"model=G-{when}-weights-step*.pth\".format(when=when)))[0])\n find_and_remove(glob.glob(join(self.checkpoint_dir,\"model=D-{when}-weights-step*.pth\".format(when=when)))[0])\n\n g_checkpoint_output_path = join(self.checkpoint_dir, \"model=G-{when}-weights-step={step}.pth\".format(when=when, step=str(step)))\n d_checkpoint_output_path = join(self.checkpoint_dir, \"model=D-{when}-weights-step={step}.pth\".format(when=when, step=str(step)))\n\n if when == \"best\":\n if len(glob.glob(join(self.checkpoint_dir,\"model=G-current-weights-step*.pth\".format(when=when)))) >= 1:\n find_and_remove(glob.glob(join(self.checkpoint_dir,\"model=G-current-weights-step*.pth\".format(when=when)))[0])\n find_and_remove(glob.glob(join(self.checkpoint_dir,\"model=D-current-weights-step*.pth\".format(when=when)))[0])\n\n g_checkpoint_output_path_ = join(self.checkpoint_dir, \"model=G-current-weights-step={step}.pth\".format(when=when, step=str(step)))\n d_checkpoint_output_path_ = join(self.checkpoint_dir, \"model=D-current-weights-step={step}.pth\".format(when=when, step=str(step)))\n\n torch.save(g_states, g_checkpoint_output_path_)\n torch.save(d_states, d_checkpoint_output_path_)\n\n torch.save(g_states, g_checkpoint_output_path)\n torch.save(d_states, d_checkpoint_output_path)\n\n if self.Gen_copy is not None:\n g_ema_states = {'state_dict': gen_copy.state_dict()}\n if len(glob.glob(join(self.checkpoint_dir, \"model=G_ema-{when}-weights-step*.pth\".format(when=when)))) >= 1:\n find_and_remove(glob.glob(join(self.checkpoint_dir, \"model=G_ema-{when}-weights-step*.pth\".format(when=when)))[0])\n\n g_ema_checkpoint_output_path = join(self.checkpoint_dir, \"model=G_ema-{when}-weights-step={step}.pth\".format(when=when, step=str(step)))\n\n if when == \"best\":\n if len(glob.glob(join(self.checkpoint_dir,\"model=G_ema-current-weights-step*.pth\".format(when=when)))) >= 1:\n find_and_remove(glob.glob(join(self.checkpoint_dir,\"model=G_ema-current-weights-step*.pth\".format(when=when)))[0])\n\n g_ema_checkpoint_output_path_ = join(self.checkpoint_dir, \"model=G_ema-current-weights-step={step}.pth\".format(when=when, step=str(step)))\n\n torch.save(g_ema_states, g_ema_checkpoint_output_path_)\n\n torch.save(g_ema_states, g_ema_checkpoint_output_path)\n\n if self.logger:\n self.logger.info(\"Saved model to {}\".format(self.checkpoint_dir))\n\n self.dis_model.train()\n self.gen_model.train()\n if self.Gen_copy is not None:\n self.Gen_copy.train()\n ################################################################################################################################\n\n\n ################################################################################################################################\n def evaluation(self, step, standing_statistics, standing_step):\n with torch.no_grad() if self.latent_op is False else dummy_context_mgr() as mpc:\n self.logger.info(\"Start Evaluation ({step} Step): {run_name}\".format(step=step, run_name=self.run_name))\n is_best = False\n num_split, num_run4PR, num_cluster4PR, beta4PR = 1, 10, 20, 8\n\n self.dis_model.eval()\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=False)\n\n fid_score, self.m1, self.s1 = calculate_fid_score(self.eval_dataloader, generator, self.dis_model, self.inception_model, self.num_eval[self.eval_type],\n self.truncated_factor, self.prior, self.latent_op, self.latent_op_step4eval, self.latent_op_alpha,\n self.latent_op_beta, self.default_device, self.mu, self.sigma, self.run_name)\n\n kl_score, kl_std = calculate_incep_score(self.eval_dataloader, generator, self.dis_model, self.inception_model, self.num_eval[self.eval_type],\n self.truncated_factor, self.prior, self.latent_op, self.latent_op_step4eval, self.latent_op_alpha,\n self.latent_op_beta, num_split, self.default_device)\n\n precision, recall, f_beta, f_beta_inv = calculate_f_beta_score(self.eval_dataloader, generator, self.dis_model, self.inception_model, self.num_eval[self.eval_type],\n num_run4PR, num_cluster4PR, beta4PR, self.truncated_factor, self.prior, self.latent_op,\n self.latent_op_step4eval, self.latent_op_alpha, self.latent_op_beta, self.default_device)\n PR_Curve = plot_pr_curve(precision, recall, self.run_name, self.logger)\n '''\n if self.D_loss.__name__ != \"loss_wgan_dis\":\n real_train_acc, fake_acc = calculate_accuracy(self.train_dataloader, generator, self.dis_model, self.D_loss, self.num_eval[self.eval_type],\n self.truncated_factor, self.prior, self.latent_op, self.latent_op_step, self.latent_op_alpha,\n self.latent_op_beta, self.default_device, cr=self.cr, eval_generated_sample=True)\n\n if self.eval_type == 'train':\n acc_dict = {'real_train': real_train_acc, 'fake': fake_acc}\n else:\n real_eval_acc = calculate_accuracy(self.eval_dataloader, generator, self.dis_model, self.D_loss, self.num_eval[self.eval_type],\n self.truncated_factor, self.prior, self.latent_op, self.latent_op_step, self.latent_op_alpha,\n self. latent_op_beta, self.default_device, cr=self.cr, eval_generated_sample=False)\n acc_dict = {'real_train': real_train_acc, 'real_valid': real_eval_acc, 'fake': fake_acc}\n\n self.writer.add_scalars('{}/Accuracy'.format(self.prune_round), acc_dict, step)\n '''\n if self.best_fid is None:\n self.best_fid, self.best_step, is_best, f_beta_best, f_beta_inv_best = fid_score, step, True, f_beta, f_beta_inv\n else:\n if fid_score <= self.best_fid:\n self.best_fid, self.best_step, is_best, f_beta_best, f_beta_inv_best = fid_score, step, True, f_beta, f_beta_inv\n\n self.writer.add_scalars('FID score', {'using {type} moments'.format(type=self.eval_type):fid_score}, step)\n self.writer.add_scalars('F_beta score', {'{num} generated images'.format(num=str(self.num_eval[self.eval_type])):f_beta}, step)\n self.writer.add_scalars('F_beta_inv score', {'{num} generated images'.format(num=str(self.num_eval[self.eval_type])):f_beta_inv}, step)\n self.writer.add_scalars('IS score', {'{num} generated images'.format(num=str(self.num_eval[self.eval_type])):kl_score}, step)\n self.writer.add_figure('PR_Curve', PR_Curve, global_step=step)\n self.logger.info('F_{beta} score (Step: {step}, Using {type} images): {F_beta}'.format(beta=beta4PR, step=step, type=self.eval_type, F_beta=f_beta))\n self.logger.info('F_1/{beta} score (Step: {step}, Using {type} images): {F_beta_inv}'.format(beta=beta4PR, step=step, type=self.eval_type, F_beta_inv=f_beta_inv))\n self.logger.info('FID score (Step: {step}, Using {type} moments): {FID}'.format(step=step, type=self.eval_type, FID=fid_score))\n self.logger.info('Inception score (Step: {step}, {num} generated images): {IS}'.format(step=step, num=str(self.num_eval[self.eval_type]), IS=kl_score))\n if self.train:\n self.logger.info('Best FID score (Step: {step}, Using {type} moments): {FID}'.format(step=self.best_step, type=self.eval_type, FID=self.best_fid))\n\n self.dis_model.train()\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=True)\n\n return is_best\n ################################################################################################################################\n\n\n ################################################################################################################################\n def save_images(self, is_generate, standing_statistics, standing_step, png=True, npz=True):\n with torch.no_grad() if self.latent_op is False else dummy_context_mgr() as mpc:\n self.dis_model.eval()\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=False)\n\n if png:\n save_images_png(self.run_name, self.eval_dataloader, self.num_eval[self.eval_type], self.num_classes, generator,\n self.dis_model, is_generate, self.truncated_factor, self.prior, self.latent_op, self.latent_op_step,\n self.latent_op_alpha, self.latent_op_beta, self.default_device)\n if npz:\n save_images_npz(self.run_name, self.eval_dataloader, self.num_eval[self.eval_type], self.num_classes, generator,\n self.dis_model, is_generate, self.truncated_factor, self.prior, self.latent_op, self.latent_op_step,\n self.latent_op_alpha, self.latent_op_beta, self.default_device)\n ################################################################################################################################\n\n\n ################################################################################################################################\n def run_image_visualization(self, nrow, ncol, standing_statistics, standing_step):\n self.logger.info('Start visualizing images....')\n with torch.no_grad() if self.latent_op is False else dummy_context_mgr() as mpc:\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=False)\n\n sampler = \"default\" if self.conditional_strategy == \"no\" else \"class_order_some\"\n if self.zcr:\n zs, fake_labels, zs_t = sample_latents(self.prior, self.batch_size, self.z_dim, 1, self.num_classes,\n self.sigma_noise, self.default_device, sampler=sampler)\n else:\n zs, fake_labels = sample_latents(self.prior, self.batch_size, self.z_dim, 1, self.num_classes,\n None, self.default_device, sampler=sampler)\n\n if self.latent_op:\n zs = latent_optimise(zs, fake_labels, self.gen_model, self.dis_model, self.conditional_strategy,\n self.latent_op_step, self.latent_op_rate, self.latent_op_alpha, self.latent_op_beta,\n False, self.default_device)\n\n generated_images = generator(zs, fake_labels, evaluation=True)\n\n plot_img_canvas((generated_images.detach().cpu()+1)/2, \"./figures/{run_name}/generated_canvas.png\".\\\n format(run_name=self.run_name), self.logger, ncol)\n\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=True)\n ################################################################################################################################\n\n\n ################################################################################################################################\n def run_linear_interpolation(self, nrow, ncol, fix_z, fix_y, standing_statistics, standing_step):\n self.logger.info('Start linear interpolation analysis....')\n with torch.no_grad() if self.latent_op is False else dummy_context_mgr() as mpc:\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=False)\n shared = generator.module.shared if isinstance(generator, DataParallel) else generator.shared\n assert int(fix_z)*int(fix_y) != 1, \"unable to switch fix_z and fix_y on together!\"\n\n if fix_z:\n zs = torch.randn(nrow, 1, self.z_dim, device=self.default_device)\n zs = zs.repeat(1, ncol, 1).view(-1, self.z_dim)\n name = \"fix_z\"\n else:\n zs = interp(torch.randn(nrow, 1, self.z_dim, device=self.default_device),\n torch.randn(nrow, 1, self.z_dim, device=self.default_device),\n ncol - 2).view(-1, self.z_dim)\n\n if fix_y:\n ys = sample_1hot(nrow, self.num_classes, device=self.default_device)\n ys = shared(ys).view(nrow, 1, -1)\n ys = ys.repeat(1, ncol, 1).view(nrow * (ncol), -1)\n name = \"fix_y\"\n else:\n ys = interp(shared(sample_1hot(nrow, self.num_classes)).view(nrow, 1, -1),\n shared(sample_1hot(nrow, self.num_classes)).view(nrow, 1, -1),\n ncol-2).view(nrow * (ncol), -1)\n\n interpolated_images = generator(zs, None, shared_label=ys, evaluation=True)\n\n plot_img_canvas((interpolated_images.detach().cpu()+1)/2, \"./figures/{run_name}/Interpolated_images_{fix_flag}.png\".\\\n format(run_name=self.run_name, fix_flag=name), self.logger, ncol)\n\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=True)\n ################################################################################################################################\n\n\n ################################################################################################################################\n def run_nearest_neighbor(self, nrow, ncol, standing_statistics, standing_step):\n self.logger.info('Start nearest neighbor analysis....')\n with torch.no_grad() if self.latent_op is False else dummy_context_mgr() as mpc:\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=False)\n\n resnet50_model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=True)\n resnet50_conv = nn.Sequential(*list(resnet50_model.children())[:-1]).to(self.default_device)\n if self.n_gpus > 1:\n resnet50_conv = DataParallel(resnet50_conv, output_device=self.default_device)\n resnet50_conv.eval()\n\n for c in tqdm(range(self.num_classes)):\n fake_images, fake_labels = generate_images_for_KNN(self.batch_size, c, generator, self.dis_model, self.truncated_factor, self.prior, self.latent_op,\n self.latent_op_step, self.latent_op_alpha, self.latent_op_beta, self.default_device)\n fake_image = torch.unsqueeze(fake_images[0], dim=0)\n fake_anchor_embedding = torch.squeeze(resnet50_conv((fake_image+1)/2))\n\n num_samples, target_sampler = target_class_sampler(self.train_dataset, c)\n train_dataloader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=False, sampler=target_sampler,\n num_workers=self.train_config['num_workers'], pin_memory=True)\n train_iter = iter(train_dataloader)\n for batch_idx in range(num_samples//self.batch_size):\n real_images, real_labels = next(train_iter)\n real_images = real_images.to(self.default_device)\n real_embeddings = torch.squeeze(resnet50_conv((real_images+1)/2))\n if batch_idx == 0:\n distances = torch.square(real_embeddings - fake_anchor_embedding).mean(dim=1).detach().cpu().numpy()\n holder = real_images.detach().cpu().numpy()\n else:\n distances = np.concatenate([distances, torch.square(real_embeddings - fake_anchor_embedding).mean(dim=1).detach().cpu().numpy()], axis=0)\n holder = np.concatenate([holder, real_images.detach().cpu().numpy()], axis=0)\n\n nearest_indices = (-distances).argsort()[-(ncol-1):][::-1]\n if c % nrow == 0:\n canvas = np.concatenate([fake_image.detach().cpu().numpy(), holder[nearest_indices]], axis=0)\n elif c % nrow == nrow-1:\n row_images = np.concatenate([fake_image.detach().cpu().numpy(), holder[nearest_indices]], axis=0)\n canvas = np.concatenate((canvas, row_images), axis=0)\n plot_img_canvas((torch.from_numpy(canvas)+1)/2, \"./figures/{run_name}/Fake_anchor_{ncol}NN_{cls}.png\".\\\n format(run_name=self.run_name,ncol=ncol, cls=c), self.logger, ncol)\n else:\n row_images = np.concatenate([fake_image.detach().cpu().numpy(), holder[nearest_indices]], axis=0)\n canvas = np.concatenate((canvas, row_images), axis=0)\n\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=True)\n ################################################################################################################################\n\n\n ################################################################################################################################\n def run_frequency_analysis(self, num_images, standing_statistics, standing_step):\n self.logger.info('Start linear interpolation analysis....')\n with torch.no_grad() if self.latent_op is False else dummy_context_mgr() as mpc:\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=False)\n\n train_iter = iter(self.train_dataloader)\n num_batches = num_images//self.batch_size\n for i in range(num_batches):\n if self.zcr:\n zs, fake_labels, zs_t = sample_latents(self.prior, self.batch_size, self.z_dim, 1, self.num_classes,\n self.sigma_noise, self.default_device)\n else:\n zs, fake_labels = sample_latents(self.prior, self.batch_size, self.z_dim, 1, self.num_classes,\n None, self.default_device)\n\n if self.latent_op:\n zs = latent_optimise(zs, fake_labels, self.gen_model, self.dis_model, self.conditional_strategy,\n self.latent_op_step, self.latent_op_rate, self.latent_op_alpha, self.latent_op_beta,\n False, self.default_device)\n\n real_images, real_labels = next(train_iter)\n fake_images = generator(zs, fake_labels, evaluation=True).detach().cpu().numpy()\n\n real_images = np.asarray((real_images + 1)*127.5, np.uint8)\n fake_images = np.asarray((fake_images + 1)*127.5, np.uint8)\n\n if i == 0:\n real_array = real_images\n fake_array = fake_images\n else:\n real_array = np.concatenate([real_array, real_images], axis = 0)\n fake_array = np.concatenate([fake_array, fake_images], axis = 0)\n\n N, C, H, W = np.shape(real_array)\n real_r, real_g, real_b = real_array[:,0,:,:], real_array[:,1,:,:], real_array[:,2,:,:]\n real_gray = 0.2989 * real_r + 0.5870 * real_g + 0.1140 * real_b\n fake_r, fake_g, fake_b = fake_array[:,0,:,:], fake_array[:,1,:,:], fake_array[:,2,:,:]\n fake_gray = 0.2989 * fake_r + 0.5870 * fake_g + 0.1140 * fake_b\n for j in tqdm(range(N)):\n real_gray_f = np.fft.fft2(real_gray[j] - ndimage.median_filter(real_gray[j], size= H//8))\n fake_gray_f = np.fft.fft2(fake_gray[j] - ndimage.median_filter(fake_gray[j], size=H//8))\n\n real_gray_f_shifted = np.fft.fftshift(real_gray_f)\n fake_gray_f_shifted = np.fft.fftshift(fake_gray_f)\n\n if j == 0:\n real_gray_spectrum = 20*np.log(np.abs(real_gray_f_shifted))/N\n fake_gray_spectrum = 20*np.log(np.abs(fake_gray_f_shifted))/N\n else:\n real_gray_spectrum += 20*np.log(np.abs(real_gray_f_shifted))/N\n fake_gray_spectrum += 20*np.log(np.abs(fake_gray_f_shifted))/N\n\n plot_spectrum_image(real_gray_spectrum, fake_gray_spectrum, self.run_name, self.logger)\n\n generator = change_generator_mode(self.gen_model, self.Gen_copy, standing_statistics, standing_step, self.prior,\n self.batch_size, self.z_dim, self.num_classes, self.default_device, training=True)\n ################################################################################################################################\n\n\n\ndef PGD(x, label, loss, model=None, steps=1, gamma=0.1, eps=(1/255), randinit=False, clip=False):\n \n # Compute loss\n x_adv = x.clone()\n if randinit:\n # adv noise (-eps, eps)\n x_adv += (2.0 * torch.rand(x_adv.shape).cuda() - 1.0) * eps\n x_adv = x_adv.cuda()\n x = x.cuda()\n\n for t in range(steps):\n out = model(x_adv, label, only_fc=True)\n loss_adv0 = -loss(out)\n grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]\n x_adv.data.add_(gamma * torch.sign(grad0.data))\n\n if clip:\n linfball_proj(x, eps, x_adv, in_place=True)\n\n return x_adv\n\ndef PGD_G(x, gen_labels, label, loss, gen_model, dis_model, steps=1, gamma=0.1, eps=(1/255), randinit=False, clip=False):\n \n # Compute loss\n x_adv = x.clone()\n x_adv = x_adv.cuda()\n x = x.cuda()\n\n for t in range(steps):\n out = gen_model(x_adv, gen_labels, l1=False)\n out = dis_model(out, label)\n loss_adv0 = -loss(out)\n grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]\n x_adv.data.add_(gamma * torch.sign(grad0.data))\n\n if clip:\n linfball_proj(x, eps, x_adv, in_place=True)\n\n return x_adv"
] | [
[
"torch.utils.data.DataLoader",
"numpy.fft.fftshift",
"torch.rand",
"torch.no_grad",
"numpy.asarray",
"torch.cuda.amp.GradScaler",
"torch.randn",
"torch.save",
"numpy.abs",
"torch.sign",
"torch.from_numpy",
"torch.nn.DataParallel",
"torch.mean",
"torch.unsqueeze",
"torch.tensor",
"torch.hub.load",
"torch.autograd.grad",
"scipy.ndimage.median_filter",
"torch.nn.MSELoss",
"torch.square",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.relu",
"torch.cuda.amp.autocast",
"numpy.shape",
"numpy.concatenate"
]
] |
KuangenZhang/pytorch_geometric | [
"0bfc79a5eaccfcd16a82395e8578a90c5e44759f"
] | [
"benchmark/points/edge_cnn_ke.py"
] | [
"import argparse\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Sequential as Seq, Linear as Lin, ReLU, LeakyReLU\nfrom torch_geometric.nn import DynamicEdgeConv, global_max_pool\n\nfrom datasets import get_dataset\nfrom train_eval import run\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=200)\nparser.add_argument('--batch_size', type=int, default=24)\nparser.add_argument('--lr', type=float, default=0.001)\nparser.add_argument('--lr_decay_factor', type=float, default=0.5)\nparser.add_argument('--lr_decay_step_size', type=int, default=50)\nparser.add_argument('--weight_decay', type=float, default=0)\nargs = parser.parse_args()\n\n\nclass Net(torch.nn.Module):\n def __init__(self, num_classes):\n super(Net, self).__init__()\n\n nn = Seq(Lin(6, 64), LeakyReLU(negative_slope=0.2),\n Lin(64, 64), LeakyReLU(negative_slope=0.2),\n Lin(64, 64), LeakyReLU(negative_slope=0.2))\n self.conv1 = DynamicEdgeConv(nn, k=20, aggr='max')\n\n nn = Seq(\n Lin(128, 128), LeakyReLU(negative_slope=0.2),\n Lin(128, 128), LeakyReLU(negative_slope=0.2),\n Lin(128, 256), LeakyReLU(negative_slope=0.2))\n self.conv2 = DynamicEdgeConv(nn, k=20, aggr='max')\n\n self.lin0 = Lin(256, 512)\n\n self.lin1 = Lin(512, 256)\n self.lin2 = Lin(256, 256)\n self.lin3 = Lin(256, num_classes)\n\n def forward(self, pos, batch):\n x = self.conv1(pos, batch)\n x = self.conv2(x, batch)\n\n x = F.relu(self.lin0(x))\n\n x = global_max_pool(x, batch)\n\n x = F.relu(self.lin1(x))\n x = F.relu(self.lin2(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin3(x)\n return F.log_softmax(x, dim=-1)\n\n\ntrain_dataset, test_dataset = get_dataset(num_points=1024)\nmodel = Net(train_dataset.num_classes)\nrun(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr,\n args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay)\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.functional.dropout",
"torch.nn.LeakyReLU"
]
] |
noahl/tensorflow | [
"b95d8cce7323d328565378e0d60d72603393f87d",
"b95d8cce7323d328565378e0d60d72603393f87d"
] | [
"tensorflow/python/ops/standard_ops.py",
"tensorflow/contrib/lite/testing/generate_examples.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=unused-import\n\"\"\"Import names of Tensor Flow standard Ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys as _sys\n\n# pylint: disable=g-bad-import-order\n# Imports the following modules so that @RegisterGradient get executed.\nfrom tensorflow.python.ops import array_grad\nfrom tensorflow.python.ops import cudnn_rnn_grad\nfrom tensorflow.python.ops import data_flow_grad\nfrom tensorflow.python.ops import manip_grad\nfrom tensorflow.python.ops import math_grad\nfrom tensorflow.python.ops import sparse_grad\nfrom tensorflow.python.ops import spectral_grad\nfrom tensorflow.python.ops import state_grad\nfrom tensorflow.python.ops import tensor_array_grad\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.array_ops import *\nfrom tensorflow.python.ops.check_ops import *\nfrom tensorflow.python.ops.clip_ops import *\nfrom tensorflow.python.ops.special_math_ops import *\n# TODO(vrv): Switch to import * once we're okay with exposing the module.\nfrom tensorflow.python.ops.confusion_matrix import confusion_matrix\nfrom tensorflow.python.ops.control_flow_ops import Assert\nfrom tensorflow.python.ops.control_flow_ops import case\nfrom tensorflow.python.ops.control_flow_ops import cond\nfrom tensorflow.python.ops.control_flow_ops import group\nfrom tensorflow.python.ops.control_flow_ops import no_op\nfrom tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin\n# pylint: enable=redefined-builtin\nfrom tensorflow.python.ops.control_flow_ops import while_loop\nfrom tensorflow.python.ops.data_flow_ops import *\nfrom tensorflow.python.ops.functional_ops import *\nfrom tensorflow.python.ops.gradients import *\nfrom tensorflow.python.ops.histogram_ops import *\nfrom tensorflow.python.ops.init_ops import *\nfrom tensorflow.python.ops.io_ops import *\nfrom tensorflow.python.ops.linalg_ops import *\nfrom tensorflow.python.ops.logging_ops import Print\nfrom tensorflow.python.ops.logging_ops import get_summary_op\nfrom tensorflow.python.ops.logging_ops import timestamp\nfrom tensorflow.python.ops.lookup_ops import initialize_all_tables\nfrom tensorflow.python.ops.lookup_ops import tables_initializer\nfrom tensorflow.python.ops.manip_ops import *\nfrom tensorflow.python.ops.math_ops import *\nfrom tensorflow.python.ops.numerics import *\nfrom tensorflow.python.ops.parsing_ops import *\nfrom tensorflow.python.ops.partitioned_variables import *\nfrom tensorflow.python.ops.random_ops import *\nfrom tensorflow.python.ops.script_ops import py_func\nfrom tensorflow.python.ops.session_ops import *\nfrom tensorflow.python.ops.sparse_ops import *\nfrom tensorflow.python.ops.state_ops import assign\nfrom tensorflow.python.ops.state_ops import assign_add\nfrom tensorflow.python.ops.state_ops import assign_sub\nfrom tensorflow.python.ops.state_ops import count_up_to\nfrom tensorflow.python.ops.state_ops import scatter_add\nfrom tensorflow.python.ops.state_ops import scatter_div\nfrom tensorflow.python.ops.state_ops import scatter_mul\nfrom tensorflow.python.ops.state_ops import scatter_sub\nfrom tensorflow.python.ops.state_ops import scatter_min\nfrom tensorflow.python.ops.state_ops import scatter_max\nfrom tensorflow.python.ops.state_ops import scatter_update\nfrom tensorflow.python.ops.state_ops import scatter_nd_add\nfrom tensorflow.python.ops.state_ops import scatter_nd_sub\n# TODO(simister): Re-enable once binary size increase due to scatter_nd\n# ops is under control.\n# from tensorflow.python.ops.state_ops import scatter_nd_mul\n# from tensorflow.python.ops.state_ops import scatter_nd_div\nfrom tensorflow.python.ops.state_ops import scatter_nd_update\nfrom tensorflow.python.ops.string_ops import *\nfrom tensorflow.python.ops.template import *\nfrom tensorflow.python.ops.tensor_array_ops import *\nfrom tensorflow.python.ops.variable_scope import *\nfrom tensorflow.python.ops.variables import *\n# pylint: enable=wildcard-import\n# pylint: enable=g-bad-import-order\n\n#### For use in remove_undocumented below:\nfrom tensorflow.python.framework import constant_op as _constant_op\nfrom tensorflow.python.ops import array_ops as _array_ops\nfrom tensorflow.python.ops import check_ops as _check_ops\nfrom tensorflow.python.ops import clip_ops as _clip_ops\nfrom tensorflow.python.ops import confusion_matrix as _confusion_matrix\nfrom tensorflow.python.ops import control_flow_ops as _control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops as _data_flow_ops\nfrom tensorflow.python.ops import functional_ops as _functional_ops\nfrom tensorflow.python.ops import gradients as _gradients\nfrom tensorflow.python.ops import histogram_ops as _histogram_ops\nfrom tensorflow.python.ops import init_ops as _init_ops\nfrom tensorflow.python.ops import io_ops as _io_ops\nfrom tensorflow.python.ops import linalg_ops as _linalg_ops\nfrom tensorflow.python.ops import logging_ops as _logging_ops\nfrom tensorflow.python.ops import manip_ops as _manip_ops\nfrom tensorflow.python.ops import math_ops as _math_ops\nfrom tensorflow.python.ops import numerics as _numerics\nfrom tensorflow.python.ops import parsing_ops as _parsing_ops\nfrom tensorflow.python.ops import partitioned_variables as _partitioned_variables\nfrom tensorflow.python.ops import random_ops as _random_ops\nfrom tensorflow.python.ops import script_ops as _script_ops\nfrom tensorflow.python.ops import session_ops as _session_ops\nfrom tensorflow.python.ops import sparse_ops as _sparse_ops\nfrom tensorflow.python.ops import special_math_ops as _special_math_ops\nfrom tensorflow.python.ops import state_ops as _state_ops\nfrom tensorflow.python.ops import string_ops as _string_ops\nfrom tensorflow.python.ops import template as _template\nfrom tensorflow.python.ops import tensor_array_ops as _tensor_array_ops\nfrom tensorflow.python.ops import variable_scope as _variable_scope\nfrom tensorflow.python.ops import variables as _variables\n\n\n_allowed_symbols_math_ops = [\n # TODO(drpng): decide if we want to reference these in the documentation.\n \"reduced_shape\",\n \"sparse_segment_mean_grad\",\n \"sparse_segment_sqrt_n_grad\",\n\n # Legacy: will be removed.\n \"arg_max\",\n \"arg_min\",\n \"lin_space\",\n \"sparse_matmul\", # Use tf.matmul.\n # Deprecated (see versions.h):\n \"batch_fft\",\n \"batch_fft2d\",\n \"batch_fft3d\",\n \"batch_ifft\",\n \"batch_ifft2d\",\n \"batch_ifft3d\",\n \"mul\", # use tf.multiply instead.\n \"neg\", # use tf.negative instead.\n \"sub\", # use tf.subtract instead.\n\n # These are documented in nn.\n # We are not importing nn because it would create a circular dependency.\n \"sigmoid\",\n \"log_sigmoid\",\n \"tanh\",\n]\n\n_allowed_symbols_array_ops = [\n # TODO(drpng): make sure they are documented.\n # Scalars:\n \"NEW_AXIS\",\n \"SHRINK_AXIS\",\n \"newaxis\",\n\n # Documented in training.py.\n # I do not import train, to avoid circular dependencies.\n # TODO(drpng): this is defined in gen_array_ops, clearly not the right\n # place.\n \"stop_gradient\",\n\n # See gen_docs_combined for tf.copy documentation.\n \"copy\",\n\n ## TODO(drpng): make them inaccessible directly.\n ## TODO(drpng): Below, to-doc means that we need to find an appropriate\n ## documentation section to reference.\n ## For re-exporting to tf.*:\n \"constant\",\n \"edit_distance\", # to-doc\n # From gen_array_ops:\n \"copy_host\", # to-doc\n \"immutable_const\", # to-doc\n \"invert_permutation\", # to-doc\n \"quantize_and_dequantize\", # to-doc\n\n # TODO(drpng): legacy symbols to be removed.\n \"batch_matrix_diag\",\n \"batch_matrix_band_part\",\n \"batch_matrix_diag_part\",\n \"batch_matrix_set_diag\",\n]\n\n_allowed_symbols_partitioned_variables = [\n \"PartitionedVariable\", # Requires doc link.\n # Legacy.\n \"create_partitioned_variables\",\n \"variable_axis_size_partitioner\",\n \"min_max_variable_partitioner\",\n \"fixed_size_partitioner\",\n]\n\n_allowed_symbols_control_flow_ops = [\n # TODO(drpng): Find a place in the documentation to reference these or\n # remove.\n \"control_trigger\",\n \"loop_cond\",\n \"merge\",\n \"switch\",\n]\n\n_allowed_symbols_functional_ops = [\n \"nest\", # Used by legacy code.\n]\n\n_allowed_symbols_gradients = [\n # Documented in training.py:\n # Not importing training.py to avoid complex graph dependencies.\n \"AggregationMethod\",\n \"GradientTape\",\n \"custom_gradient\",\n \"gradients\", # tf.gradients = gradients.gradients\n \"hessians\",\n]\n\n_allowed_symbols_clip_ops = [\n # Documented in training.py:\n # Not importing training.py to avoid complex graph dependencies.\n \"clip_by_average_norm\",\n \"clip_by_global_norm\",\n \"clip_by_norm\",\n \"clip_by_value\",\n \"global_norm\",\n]\n\n_allowed_symbols_logging_ops = [\n # Documented in training.py.\n # We are not importing training.py to avoid complex dependencies.\n \"audio_summary\",\n \"histogram_summary\",\n \"image_summary\",\n \"merge_all_summaries\",\n \"merge_summary\",\n \"scalar_summary\",\n\n # TODO(drpng): link in training.py if it should be documented.\n \"get_summary_op\",\n]\n\n_allowed_symbols_variable_scope_ops = [\n \"get_local_variable\", # Documented in framework package.\n]\n\n_allowed_symbols_misc = [\n \"deserialize_many_sparse\",\n \"parse_single_sequence_example\",\n \"serialize_many_sparse\",\n \"serialize_sparse\",\n \"confusion_matrix\",\n]\n\n_allowed_symbols = (_allowed_symbols_array_ops +\n _allowed_symbols_clip_ops +\n _allowed_symbols_control_flow_ops +\n _allowed_symbols_functional_ops +\n _allowed_symbols_gradients +\n _allowed_symbols_logging_ops +\n _allowed_symbols_math_ops +\n _allowed_symbols_variable_scope_ops +\n _allowed_symbols_misc +\n _allowed_symbols_partitioned_variables)\n\nremove_undocumented(__name__, _allowed_symbols, [\n _sys.modules[__name__],\n _array_ops,\n _check_ops,\n _clip_ops,\n _confusion_matrix,\n _control_flow_ops,\n _constant_op,\n _data_flow_ops,\n _functional_ops,\n _gradients,\n _histogram_ops,\n _init_ops,\n _io_ops,\n _linalg_ops,\n _logging_ops,\n _manip_ops,\n _math_ops,\n _numerics,\n _parsing_ops,\n _partitioned_variables,\n _random_ops,\n _script_ops,\n _session_ops,\n _sparse_ops,\n _special_math_ops,\n _state_ops,\n _string_ops,\n _template,\n _tensor_array_ops,\n _variable_scope,\n _variables,\n])\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Generate a series of TensorFlow graphs that become tflite test cases.\n\nUsage:\n\ngenerate_examples <output directory>\n\nbazel run //tensorflow/contrib/lite/testing:generate_examples\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport itertools\nimport os\nimport re\nimport sys\nimport tempfile\nimport traceback\nimport zipfile\nimport numpy as np\nfrom six import StringIO\nfrom six.moves import xrange\n\n# TODO(aselle): Disable GPU for now\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n# pylint: disable=g-import-not-at-top\nimport tensorflow as tf\nfrom google.protobuf import text_format\n# TODO(aselle): switch to TensorFlow's resource_loader\nfrom tensorflow.contrib.lite.testing import generate_examples_report as report_lib\nfrom tensorflow.python.framework import graph_util as tf_graph_util\nfrom tensorflow.python.ops import rnn\n\nparser = argparse.ArgumentParser(description=\"Script to generate TFLite tests.\")\nparser.add_argument(\"output_path\",\n help=\"Directory where the outputs will be go.\")\nparser.add_argument(\"--zip_to_output\",\n type=str,\n help=\"Particular zip to output.\",\n required=False)\nparser.add_argument(\"--toco\",\n type=str,\n help=\"Path to toco tool.\",\n required=True)\nparser.add_argument(\n \"--known_bugs_are_errors\",\n action=\"store_true\",\n help=(\"If a particular model is affected by a known bug,\"\n \" count it as a toco error.\"))\nparser.add_argument(\n \"--ignore_toco_errors\",\n action=\"store_true\",\n help=\"Raise an exception if any toco error is encountered.\")\nparser.add_argument(\n \"--save_graphdefs\",\n action=\"store_true\",\n help=\"Include intermediate graphdefs in the output zip files.\")\n\n\nRANDOM_SEED = 342\nTEST_INPUT_DEPTH = 3\n\n\n# A map from regular expression to bug number. Any test failure with label\n# matching the expression will be considered due to the corresponding bug.\nKNOWN_BUGS = {\n # TOCO doesn't support scalars as input.\n r\"relu.*input_shape=\\[\\]\": \"67587484\",\n r\"sigmoid.*input_shape=\\[\\]\": \"67645668\",\n # Concat doesn't work with a single input tensor\n r\"concat.*num_tensors=1\": \"67378344\",\n # Transposition in MatMul is not supported.\n r\"fully_connected.*transpose_.=True\": \"67586970\",\n # Softmax graphs are too complex.\n r\"softmax.*dim=0\": \"67749831\",\n r\"softmax.*input_shape=\\[1,3,4,3\\]\": \"67749831\",\n # SpaceToDepth only supports float32.\n r\"space_to_depth.*(float16|int32|uint8|int64)\": \"68018134\",\n # BatchToSpaceND only supports 4D tensors.\n r\"batch_to_space_nd.*input_shape=\\[8,2,2,2,1,1\\]\": \"70594733\",\n # Div will use floordiv.\n r\"div.*int32\": \"72051395\",\n # TOCO require matching dimensions in strided_slice.\n r\"strided_slice.*begin=\\[0\\].*end=\\[1\\].*\": \"73170889\",\n # No support for SplitV\n r\"split.*num_or_size_splits=\\[2,2\\]\": \"73377559\",\n # Needs support for dimensions other than the last one in argmax.\n r\"arg_max.*axis=0.*\": \"77546240\",\n r\"arg_max.*axis=1.*\": \"77546240\",\n r\"arg_max.*axis=2.*\": \"77546240\",\n}\n\n\nclass ExtraTocoOptions(object):\n \"\"\"Additonal toco options besides input, output, shape.\"\"\"\n\n def __init__(self):\n # Whether to ignore control dependency nodes.\n self.drop_control_dependency = False\n # Allow custom ops in the toco conversion.\n self.allow_custom_ops = False\n # Rnn states that are used to support rnn / lstm cells.\n self.rnn_states = None\n\n\ndef toco_options(data_types,\n input_arrays,\n output_arrays,\n shapes,\n extra_toco_options=ExtraTocoOptions()):\n \"\"\"Create TOCO options to process a model.\n\n Args:\n data_types: input and inference types used by TOCO.\n input_arrays: names of the input tensors\n output_arrays: name of the output tensors\n shapes: shapes of the input tensors\n extra_toco_options: additional toco options\n Returns:\n the options in a string.\n \"\"\"\n shape_str = \":\".join([\",\".join(str(y) for y in x) for x in shapes])\n inference_type = \"FLOAT\"\n # TODO(ahentz): if we get multi-input quantization to work we need this\n # to change\n if data_types[0] == \"QUANTIZED_UINT8\":\n inference_type = \"QUANTIZED_UINT8\"\n s = (\" --input_data_types=%s\" % \",\".join(data_types) +\n \" --inference_type=%s\" % inference_type +\n \" --input_format=TENSORFLOW_GRAPHDEF\" + \" --output_format=TFLITE\" +\n \" --input_arrays=%s\" % \",\".join(input_arrays) +\n \" --input_shapes=%s\" % shape_str +\n \" --output_arrays=%s\" % \",\".join(output_arrays))\n if extra_toco_options.drop_control_dependency:\n s += \" --drop_control_dependency\"\n if extra_toco_options.allow_custom_ops:\n s += \" --allow_custom_ops\"\n if extra_toco_options.rnn_states:\n s += (\" --rnn_states='\" + extra_toco_options.rnn_states + \"'\")\n return s\n\n\ndef write_examples(fp, examples):\n \"\"\"Given a list `examples`, write a text format representation.\n\n The file format is csv like with a simple repeated pattern. We would ike\n to use proto here, but we can't yet due to interfacing with the Android\n team using this format.\n\n Args:\n fp: File-like object to write to.\n examples: Example dictionary consiting of keys \"inputs\" and \"outputs\"\n \"\"\"\n\n def write_tensor(fp, x):\n \"\"\"Write tensor in file format supported by TFLITE example.\"\"\"\n fp.write(\"dtype,%s\\n\" % x.dtype)\n fp.write(\"shape,\" + \",\".join(map(str, x.shape)) + \"\\n\")\n # Output 9 digits after the point to ensure the precision is good enough.\n values = [\"{:.9f}\".format(value) for value in list(x.flatten())]\n fp.write(\"values,\" + \",\".join(values) + \"\\n\")\n\n fp.write(\"test_cases,%d\\n\" % len(examples))\n for example in examples:\n fp.write(\"inputs,%d\\n\" % len(example[\"inputs\"]))\n for i in example[\"inputs\"]:\n write_tensor(fp, i)\n fp.write(\"outputs,%d\\n\" % len(example[\"outputs\"]))\n for i in example[\"outputs\"]:\n write_tensor(fp, i)\n\n\ndef write_test_cases(fp, model_name, examples):\n \"\"\"Given a dictionary of `examples`, write a text format representation.\n\n The file format is protocol-buffer-like, even though we don't use proto due\n to the needs of the Android team.\n\n Args:\n fp: File-like object to write to.\n model_name: Filename where the model was written to, relative to filename.\n examples: Example dictionary consiting of keys \"inputs\" and \"outputs\"\n \"\"\"\n\n fp.write(\"load_model: %s\\n\" % os.path.basename(model_name))\n for example in examples:\n fp.write(\"reshape {\\n\")\n for t in example[\"inputs\"]:\n fp.write(\" input: \\\"\" + \",\".join(map(str, t.shape)) + \"\\\"\\n\")\n fp.write(\"}\\n\")\n fp.write(\"invoke {\\n\")\n\n for t in example[\"inputs\"]:\n values = [\"{:.9f}\".format(value) for value in list(t.flatten())]\n fp.write(\" input: \\\"\" + \",\".join(values) + \"\\\"\\n\")\n for t in example[\"outputs\"]:\n values = [\"{:.9f}\".format(value) for value in list(t.flatten())]\n fp.write(\" output: \\\"\" + \",\".join(values) + \"\\\"\\n\")\n fp.write(\"}\\n\")\n\n\n_TF_TYPE_INFO = {\n tf.float32: (np.float32, \"FLOAT\"),\n tf.float16: (np.float16, \"FLOAT\"),\n tf.int32: (np.int32, \"INT32\"),\n tf.uint8: (np.uint8, \"QUANTIZED_UINT8\"),\n tf.int64: (np.int64, \"INT64\"),\n}\n\n\ndef create_tensor_data(dtype, shape, min_value=-100, max_value=100):\n \"\"\"Build tensor data spreading the range [min_value, max_value).\"\"\"\n\n if dtype in _TF_TYPE_INFO:\n dtype = _TF_TYPE_INFO[dtype][0]\n\n if dtype in (tf.float32, tf.float16):\n value = (max_value-min_value)*np.random.random_sample(shape)+min_value\n elif dtype in (tf.int32, tf.uint8, tf.int64):\n value = np.random.randint(min_value, max_value+1, shape)\n return value.astype(dtype)\n\n\ndef freeze_graph(session, outputs):\n \"\"\"Freeze the current graph.\n\n Args:\n session: Tensorflow sessions containing the graph\n outputs: List of output tensors\n\n Returns:\n The frozen graph_def.\n \"\"\"\n return tf_graph_util.convert_variables_to_constants(\n session, session.graph.as_graph_def(), [x.op.name for x in outputs])\n\n\ndef make_control_dep_tests(zip_path):\n \"\"\"Make a set of tests that use control dependencies.\"\"\"\n\n test_parameters = [{\n \"input_shape\": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)\n assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)\n with tf.control_dependencies([assert_op]):\n out = tf.nn.conv2d(input_tensor, filter_value,\n strides=(1, 1, 1, 1), padding=\"SAME\")\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(tf.float32, parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n extra_toco_options = ExtraTocoOptions()\n extra_toco_options.drop_control_dependency = True\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs,\n extra_toco_options)\n\n\ndef toco_convert(graph_def_str, input_tensors, output_tensors,\n extra_toco_options):\n \"\"\"Convert a model's graph def into a tflite model.\n\n NOTE: this currently shells out to the toco binary, but we would like\n convert to Python API tooling in the future.\n\n Args:\n graph_def_str: Graph def proto in serialized string format.\n input_tensors: List of input tensor tuples `(name, shape, type)`.\n output_tensors: List of output tensors (names).\n extra_toco_options: Additional toco options.\n\n Returns:\n output tflite model, log_txt from conversion\n or None, log_txt if it did not convert properly.\n \"\"\"\n data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors]\n opts = toco_options(\n data_types=data_types,\n input_arrays=[x[0] for x in input_tensors],\n shapes=[x[1] for x in input_tensors],\n output_arrays=output_tensors,\n extra_toco_options=extra_toco_options)\n\n with tempfile.NamedTemporaryFile() as graphdef_file, \\\n tempfile.NamedTemporaryFile() as output_file, \\\n tempfile.NamedTemporaryFile(\"w+\") as stdout_file:\n graphdef_file.write(graph_def_str)\n graphdef_file.flush()\n\n # TODO(aselle): Switch this to subprocess at some point.\n cmd = (\"%s --input_file=%s --output_file=%s %s > %s 2>&1\" %\n (bin_path, graphdef_file.name, output_file.name, opts,\n stdout_file.name))\n exit_code = os.system(cmd)\n log = (\n cmd + \"exited with code %d\" % exit_code + \"\\n------------------\\n\" +\n stdout_file.read())\n return (None if exit_code != 0 else output_file.read()), log\n\n\ndef normalize_output_name(output_name):\n \"\"\"Remove :0 suffix from tensor names.\"\"\"\n return output_name.split(\":\")[0] if output_name.endswith(\n \":0\") else output_name\n\n\ndef make_zip_of_tests(zip_path,\n test_parameters,\n make_graph,\n make_test_inputs,\n extra_toco_options=ExtraTocoOptions(),\n use_frozen_graph=False):\n \"\"\"Helper to make a zip file of a bunch of TensorFlow models.\n\n This does a cartestian product of the dictionary of test_parameters and\n calls make_graph() for each item in the cartestian product set.\n If the graph is built successfully, then make_test_inputs() is called to\n build expected input/output value pairs. The model is then converted to tflite\n with toco, and the examples are serialized with the tflite model into a zip\n file (2 files per item in the cartesian product set).\n\n Args:\n zip_path: Path of zip file to write\n test_parameters: Dictionary mapping to lists for each parameter.\n e.g. `{\"strides\": [[1,3,3,1], [1,2,2,1]], \"foo\": [1.2, 1.3]}`\n make_graph: function that takes current parameters and returns tuple\n `[input1, input2, ...], [output1, output2, ...]`\n make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,\n `output_tensors` and returns tuple `(input_values, output_values)`.\n extra_toco_options: Additional toco options.\n use_frozen_graph: Whether or not freeze graph before toco converter.\n\n Raises:\n RuntimeError: if there are toco errors that can't be ignored.\n \"\"\"\n\n # TODO(aselle): Make this allow multiple inputs outputs.\n archive = zipfile.PyZipFile(zip_path, \"w\")\n zip_manifest = []\n convert_report = []\n toco_errors = 0\n for parameters in test_parameters:\n keys = parameters.keys()\n for curr in itertools.product(*parameters.values()):\n label = zip_path.replace(\".zip\", \"\") + (\",\".join(\n \"%s=%r\" % z for z in sorted(zip(keys, curr))).replace(\" \", \"\"))\n if label[0] == \"/\":\n label = label[1:]\n param_dict = dict(zip(keys, curr))\n\n def build_example(label, param_dict_real):\n \"\"\"Build the model with parameter values set in param_dict_real.\n\n Args:\n label: Label of the model (i.e. the filename in the zip).\n param_dict_real: Parameter dictionary (arguments to the factories\n make_graph and make_test_inputs)\n Returns:\n (tflite_model_binary, report) where tflite_model_binary is the\n serialized flatbuffer as a string and report is a dictionary with\n keys `toco_log` (log of toco conversion), `tf_log` (log of tf\n conversion), `toco` (a string of success status of the conversion),\n `tf` (a string success status of the conversion).\n \"\"\"\n\n np.random.seed(RANDOM_SEED)\n report = {\"toco\": report_lib.NOTRUN, \"tf\": report_lib.FAILED}\n\n # Build graph\n report[\"tf_log\"] = \"\"\n report[\"toco_log\"] = \"\"\n tf.reset_default_graph()\n\n with tf.device(\"/cpu:0\"):\n try:\n inputs, outputs = make_graph(param_dict_real)\n except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,\n ValueError):\n report[\"tf_log\"] += traceback.format_exc()\n return None, report\n\n sess = tf.Session()\n try:\n baseline_inputs, baseline_outputs = (make_test_inputs(\n param_dict_real, sess, inputs, outputs))\n except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,\n ValueError):\n report[\"tf_log\"] += traceback.format_exc()\n return None, report\n report[\"toco\"] = report_lib.FAILED\n report[\"tf\"] = report_lib.SUCCESS\n # Convert graph to toco\n input_tensors = [(input_tensor.name.split(\":\")[0],\n input_tensor.get_shape(), input_tensor.dtype)\n for input_tensor in inputs]\n output_tensors = [normalize_output_name(out.name) for out in outputs]\n graph_def = freeze_graph(\n sess,\n tf.global_variables() + inputs +\n outputs) if use_frozen_graph else sess.graph_def\n tflite_model_binary, toco_log = toco_convert(\n graph_def.SerializeToString(), input_tensors, output_tensors,\n extra_toco_options)\n report[\"toco\"] = (report_lib.SUCCESS if tflite_model_binary is not None\n else report_lib.FAILED)\n report[\"toco_log\"] = toco_log\n\n if FLAGS.save_graphdefs:\n archive.writestr(label + \".pb\",\n text_format.MessageToString(graph_def),\n zipfile.ZIP_DEFLATED)\n\n if tflite_model_binary:\n archive.writestr(label + \".bin\", tflite_model_binary,\n zipfile.ZIP_DEFLATED)\n example = {\"inputs\": baseline_inputs, \"outputs\": baseline_outputs}\n\n example_fp = StringIO()\n write_examples(example_fp, [example])\n archive.writestr(label + \".inputs\",\n example_fp.getvalue(), zipfile.ZIP_DEFLATED)\n\n example_fp2 = StringIO()\n write_test_cases(example_fp2, label + \".bin\", [example])\n archive.writestr(label + \"_tests.txt\",\n example_fp2.getvalue(), zipfile.ZIP_DEFLATED)\n\n zip_manifest.append(label + \"\\n\")\n\n return tflite_model_binary, report\n\n _, report = build_example(label, param_dict)\n\n if report[\"toco\"] == report_lib.FAILED:\n ignore_error = False\n if not FLAGS.known_bugs_are_errors:\n for pattern, bug_number in KNOWN_BUGS.items():\n if re.search(pattern, label):\n print(\"Ignored TOCO error due to bug %s\" % bug_number)\n ignore_error = True\n if not ignore_error:\n toco_errors += 1\n print(\"-----------------\\ntoco error!\\n%s\\n-----------------\\n\" %\n report[\"toco_log\"])\n\n convert_report.append((param_dict, report))\n report_io = StringIO()\n report_lib.make_report_table(report_io, zip_path, convert_report)\n archive.writestr(\"report.html\", report_io.getvalue())\n\n archive.writestr(\"manifest.txt\", \"\".join(zip_manifest), zipfile.ZIP_DEFLATED)\n\n # Log statistics of what succeeded\n total_conversions = len(convert_report)\n tf_success = sum(1 for x in convert_report\n if x[1][\"tf\"] == report_lib.SUCCESS)\n toco_success = sum(1 for x in convert_report\n if x[1][\"toco\"] == report_lib.SUCCESS)\n percent = 0\n if tf_success > 0:\n percent = float(toco_success) / float(tf_success) * 100.\n tf.logging.info((\"Archive %s Considered %d graphs, %d TF evaluated graphs \"\n \" and %d TOCO converted graphs (%.1f%%\"), zip_path,\n total_conversions, tf_success, toco_success, percent)\n\n if not FLAGS.ignore_toco_errors and toco_errors > 0:\n raise RuntimeError(\n \"Found %d errors while generating toco models\" % toco_errors)\n\n\ndef make_pool_tests(pool_op_in):\n \"\"\"Make a set of tests to do average pooling.\n\n Args:\n pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool`.\n\n Returns:\n A function representing the true generator (after curried pool_op_in).\n \"\"\"\n\n pool_op = pool_op_in\n\n def f(zip_path):\n \"\"\"Actual function that generates examples.\n\n Args:\n zip_path: path to write zip to.\n \"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"ksize\": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],\n \"strides\": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],\n # TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).\n \"input_shape\": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"], # TODO(aselle): NCHW would be good\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = pool_op(\n input_tensor,\n ksize=parameters[\"ksize\"],\n strides=parameters[\"strides\"],\n data_format=parameters[\"data_format\"],\n padding=parameters[\"padding\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(tf.float32, parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n return f\n\n\ndef make_l2_pool_tests(zip_path):\n make_pool_tests(make_l2_pool)(zip_path)\n\n\ndef make_avg_pool_tests(zip_path):\n make_pool_tests(tf.nn.avg_pool)(zip_path)\n\n\ndef make_max_pool_tests(zip_path):\n make_pool_tests(tf.nn.max_pool)(zip_path)\n\n\ndef make_relu_tests(zip_path):\n \"\"\"Make a set of tests to do relu.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],\n [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = tf.nn.relu(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-4, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_relu1_tests(zip_path):\n \"\"\"Make a set of tests to do relu1.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],\n [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n # Note that the following is not supported:\n # out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))\n out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-3, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_relu6_tests(zip_path):\n \"\"\"Make a set of tests to do relu6.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],\n [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = tf.nn.relu(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-3, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\n# This function tests various TensorFLow functions that generates Const op,\n# including `tf.ones`, `tf.zeros` and random functions.\ndef make_constant_tests(zip_path):\n \"\"\"Make a set of tests to do constant ops.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],\n }]\n\n def build_graph(parameters):\n # Since Toco & Tflite can't have a single constant op in the entire graph,\n # this test adds a zero tensor with a constant op tensor.\n input1 = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input1\",\n shape=parameters[\"input_shape\"])\n out = tf.ones(parameters[\"input_shape\"], dtype=parameters[\"dtype\"]) + input1\n return [input1], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input1 = np.zeros(parameters[\"input_shape\"],\n dtype=_TF_TYPE_INFO[parameters[\"dtype\"]][0])\n return [input1], sess.run(outputs, feed_dict={inputs[0]: input1})\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_binary_op_tests(zip_path, binary_operator):\n \"\"\"Make a set of tests to do add with and without broadcast.\"\"\"\n\n # These parameters are split because we don't support broadcasting.\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape_1\": [[1, 3, 4, 3]],\n \"input_shape_2\": [[1, 3, 4, 3]],\n \"activation\": [True]\n }, {\n \"dtype\": [tf.float32],\n \"input_shape_1\": [[5]],\n \"input_shape_2\": [[5]],\n \"activation\": [False, True]\n }, {\n \"dtype\": [tf.float32],\n \"input_shape_1\": [[1, 3, 4, 3]],\n \"input_shape_2\": [[3]],\n \"activation\": [True]\n }]\n\n def build_graph(parameters):\n \"\"\"Builds the graph given the current parameters.\"\"\"\n input1 = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape_1\"])\n input2 = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_2\"])\n out = binary_operator(input1, input2)\n if parameters[\"activation\"]:\n out = tf.nn.relu(out)\n return [input1, input2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Builds operand inputs for op.\"\"\"\n input1 = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape_1\"])\n input2 = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape_2\"])\n return [input1, input2], sess.run(\n outputs, feed_dict={\n inputs[0]: input1,\n inputs[1]: input2\n })\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_mean_tests(zip_path):\n \"\"\"Make a set of tests to do mean.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape\": [[3, 2, 4]],\n \"axis\": [\n None, 0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],\n [2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0],\n [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]\n ],\n \"const_axis\": [True, False],\n \"keepdims\": [True, False],\n }, {\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape\": [[1, 224, 224, 3]],\n \"axis\": [\n None, 0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3],\n [3, 2, 1, 0], [3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2,\n -3, -4, [0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],\n [2, 2, 3], [-3, -3, -4], [-3, 2, 1]\n ],\n \"const_axis\": [True, False],\n \"keepdims\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the mean op testing graph.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n # Get axis as either a placeholder or constants.\n if parameters[\"const_axis\"]:\n axis = parameters[\"axis\"]\n input_tensors = [input_tensor]\n else:\n if isinstance(parameters[\"axis\"], list):\n shape = [len(parameters[\"axis\"])]\n else:\n shape = [0] # shape for None or integers.\n axis = tf.placeholder(dtype=tf.int32, name=\"axis\", shape=shape)\n input_tensors = [input_tensor, axis]\n\n out = tf.reduce_mean(\n input_tensor, axis=axis, keepdims=parameters[\"keepdims\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"input_dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"const_axis\"]:\n if parameters[\"axis\"]:\n values.append(np.array(parameters[\"axis\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_exp_tests(zip_path):\n \"\"\"Make a set of tests to do exp.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the exp op testing graph.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n out = tf.exp(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"input_dtype\"], parameters[\"input_shape\"],\n min_value=-100, max_value=9)\n ]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_log_softmax_tests(zip_path):\n \"\"\"Make a set of tests to do log_softmax.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape\": [[1, 100], [4, 2], [5, 224]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the log_softmax op testing graph.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n out = tf.nn.log_softmax(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(\n parameters[\"input_dtype\"],\n parameters[\"input_shape\"],\n min_value=-100,\n max_value=9)\n ]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_maximum_tests(zip_path):\n \"\"\"Make a set of tests to do maximum.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape_1\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n \"input_shape_2\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the maximum op testing graph.\"\"\"\n input_tensor_1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input_1\",\n shape=parameters[\"input_shape_1\"])\n input_tensor_2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input_2\",\n shape=parameters[\"input_shape_2\"])\n\n out = tf.maximum(input_tensor_1, input_tensor_2)\n return [input_tensor_1, input_tensor_2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_1\"]),\n create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_2\"])\n ]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_minimum_tests(zip_path):\n \"\"\"Make a set of tests to do minimum.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32],\n \"input_shape_1\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n \"input_shape_2\": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the minimum op testing graph.\"\"\"\n input_tensor_1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input_1\",\n shape=parameters[\"input_shape_1\"])\n input_tensor_2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input_2\",\n shape=parameters[\"input_shape_2\"])\n\n out = tf.minimum(input_tensor_1, input_tensor_2)\n return [input_tensor_1, input_tensor_2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_1\"]),\n create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_2\"])\n ]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_binary_op_tests_func(binary_operator):\n \"\"\"Return a function that does a test on a binary operator.\"\"\"\n return lambda zip_path: make_binary_op_tests(zip_path, binary_operator)\n\n\ndef make_add_tests(zip_path):\n make_binary_op_tests(zip_path, tf.add)\n\n\ndef make_div_tests(zip_path):\n make_binary_op_tests(zip_path, tf.div)\n\n\ndef make_sub_tests(zip_path):\n make_binary_op_tests(zip_path, tf.subtract)\n\n\ndef make_mul_tests(zip_path):\n make_binary_op_tests(zip_path, tf.multiply)\n\n\ndef make_gather_tests(zip_path):\n \"\"\"Make a set of tests to do gather.\"\"\"\n\n test_parameters = [{\n # TODO(mgubin): add string tests when they are supported by Toco.\n # TODO(mgubin): add tests for Nd indices when they are supported by\n # TfLite.\n \"params_dtype\": [tf.float32, tf.int32],\n \"params_shape\": [[10], [1, 2, 20]],\n \"indices_dtype\": [tf.int32],\n \"indices_shape\": [[3], [5]],\n \"axis\": [0, 1],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the gather op testing graph.\"\"\"\n params = tf.placeholder(\n dtype=parameters[\"params_dtype\"],\n name=\"params\",\n shape=parameters[\"params_shape\"])\n indices = tf.placeholder(\n dtype=parameters[\"indices_dtype\"],\n name=\"indices\",\n shape=parameters[\"indices_shape\"])\n out = tf.gather(params, indices, axis=parameters[\"axis\"])\n return [params, indices], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n params = create_tensor_data(parameters[\"params_dtype\"],\n parameters[\"params_shape\"])\n indices = create_tensor_data(parameters[\"indices_dtype\"],\n parameters[\"indices_shape\"], 0,\n parameters[\"params_shape\"][0] - 1)\n return [params, indices], sess.run(\n outputs, feed_dict=dict(zip(inputs, [params, indices])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_global_batch_norm_tests(zip_path):\n \"\"\"Make a set of tests to do batch_norm_with_global_normalization.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 1, 6, 2], [3, 4, 5, 4]],\n \"epsilon\": [0.1, 0.0001],\n \"scale_after\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the global batch norm testing graph.\"\"\"\n input_shape = parameters[\"input_shape\"]\n scale_shape = input_shape[3]\n\n scale = create_tensor_data(parameters[\"dtype\"], scale_shape)\n offset = create_tensor_data(parameters[\"dtype\"], scale_shape)\n mean = create_tensor_data(parameters[\"dtype\"], scale_shape)\n variance = create_tensor_data(parameters[\"dtype\"], scale_shape)\n\n x = create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n x_norm = tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, scale, offset,\n parameters[\"epsilon\"], parameters[\"scale_after\"])\n\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.add(input_tensor, x_norm)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_fused_batch_norm_tests(zip_path):\n \"\"\"Make a set of tests to do fused_batch_norm.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 1, 6, 2]],\n \"epsilon\": [0.001, 0.1],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the testing graph for fused batch normalization.\"\"\"\n input_shape = parameters[\"input_shape\"]\n scale_shape = input_shape[3]\n\n scale = create_tensor_data(parameters[\"dtype\"], scale_shape)\n offset = create_tensor_data(parameters[\"dtype\"], scale_shape)\n mean = create_tensor_data(parameters[\"dtype\"], scale_shape)\n variance = create_tensor_data(parameters[\"dtype\"], scale_shape)\n\n x = create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n [x_norm, _, _] = tf.nn.fused_batch_norm(\n x, scale, offset, mean, variance,\n parameters[\"epsilon\"], data_format=\"NHWC\", is_training=False)\n\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.add(input_tensor, x_norm)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_conv_tests(zip_path):\n \"\"\"Make a set of tests to do convolution.\"\"\"\n\n test_parameters = [\n {\n \"input_shape\": [[1, 3, 4, 3]],\n \"filter_shape\": [[1, 1, 3, 2]],\n \"strides\": [[1, 1, 1, 1], [1, 2, 3, 1]],\n \"dilations\": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"], # TODO(aselle): NCHW would be good\n \"constant_filter\": [True, False],\n },\n {\n \"input_shape\": [[2, 14, 14, 2]],\n \"filter_shape\": [[6, 6, 2, 2]],\n \"strides\": [[1, 1, 1, 1], [1, 2, 3, 1]],\n \"dilations\": [[1, 1, 1, 1], [1, 2, 2, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"], # TODO(aselle): NCHW would be good\n \"constant_filter\": [True, False],\n }\n ]\n\n def build_graph(parameters):\n \"\"\"Build a conv graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n\n # Get filter input either as a placeholder or constants. Also get a list of\n # the input tensors that are represented as placeholders.\n if parameters[\"constant_filter\"]:\n filter_input = create_tensor_data(np.float32, parameters[\"filter_shape\"])\n input_tensors = [input_tensor]\n else:\n filter_input = tf.placeholder(\n dtype=tf.float32, name=\"filter\", shape=parameters[\"filter_shape\"])\n input_tensors = [input_tensor, filter_input]\n\n out = tf.nn.conv2d(\n input_tensor,\n filter_input,\n strides=parameters[\"strides\"],\n dilations=parameters[\"dilations\"],\n padding=parameters[\"padding\"],\n data_format=parameters[\"data_format\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n # Build list of input values either containing 1 tensor (input) or 2 tensors\n # (input, filter) based on whether filter is constant or variable input.\n values = [create_tensor_data(np.float32, parameters[\"input_shape\"])]\n if not parameters[\"constant_filter\"]:\n values.append(create_tensor_data(np.float32, parameters[\"filter_shape\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_depthwiseconv_tests(zip_path):\n \"\"\"Make a set of tests to do convolution.\"\"\"\n\n # Tensorflow only supports equal strides\n test_parameters = [\n {\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 10, 3]],\n \"filter_size\": [[1, 1], [1, 2], [3, 3]],\n \"strides\": [[1, 1, 1, 1], [1, 3, 3, 1]],\n \"channel_multiplier\": [1, 2],\n \"rate\": [[1, 1]],\n \"padding\": [\"SAME\", \"VALID\"],\n \"data_format\": [\"NHWC\"],\n \"constant_filter\": [True, False],\n },\n {\n \"input_shape\": [[1, 3, 4, 3]],\n \"filter_size\": [[1, 1]],\n \"strides\": [[1, 1, 2, 1]], # TF needs [1, x, x, 1]\n \"channel_multiplier\": [2],\n \"rate\": [[2, 2]], # Only [1, 1] is supported\n \"padding\": [\"SAME\"],\n \"data_format\": [\"NHWC\"],\n \"constant_filter\": [True, False],\n }\n ]\n\n def get_tensor_shapes(parameters):\n input_shape = parameters[\"input_shape\"]\n filter_size = parameters[\"filter_size\"]\n filter_shape = filter_size + [\n input_shape[3], parameters[\"channel_multiplier\"]\n ]\n return [input_shape, filter_shape]\n\n def build_graph(parameters):\n \"\"\"Build a depthwise conv graph given `parameters`.\"\"\"\n input_shape, filter_shape = get_tensor_shapes(parameters)\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=input_shape)\n\n # Get filter input either as a placeholder or constants. Also get a list of\n # the input tensors that are represented as placeholders.\n if parameters[\"constant_filter\"]:\n filter_input = create_tensor_data(np.float32, filter_shape)\n input_tensors = [input_tensor]\n else:\n filter_input = tf.placeholder(\n dtype=tf.float32, name=\"filter\", shape=filter_shape)\n input_tensors = [input_tensor, filter_input]\n\n out = tf.nn.depthwise_conv2d(\n input_tensor,\n filter_input,\n strides=parameters[\"strides\"],\n rate=parameters[\"rate\"],\n padding=parameters[\"padding\"],\n data_format=parameters[\"data_format\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n # Build list of input values either containing 1 tensor (input) or 2 tensors\n # (input, filter) based on whether filter is constant or variable input.\n input_shape, filter_shape = get_tensor_shapes(parameters)\n values = [create_tensor_data(np.float32, input_shape)]\n if not parameters[\"constant_filter\"]:\n values.append(create_tensor_data(np.float32, filter_shape))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_split_tests(zip_path):\n \"\"\"Make a set of tests to do tf.split.\"\"\"\n\n test_parameters = [{\n \"input_shape\": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],\n \"num_or_size_splits\": [1, 2, 3, 4, 5, [2, 2]],\n \"axis\": [0, 1, 2, 3, -4, -3, -2, -1],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = tf.split(\n input_tensor, parameters[\"num_or_size_splits\"], parameters[\"axis\"])\n return [input_tensor], out\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [create_tensor_data(np.float32, parameters[\"input_shape\"])]\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_concat_tests(zip_path):\n \"\"\"Make a set of tests to do concatenation.\"\"\"\n\n test_parameters = [{\n \"base_shape\": [[1, 3, 4, 3], [3, 4]],\n \"num_tensors\": [1, 2, 3, 4, 5, 6],\n \"axis\": [0, 1, 2, 3, -3, -2, -1],\n }]\n\n def get_shape(parameters, delta):\n \"\"\"Return a tweaked version of 'base_shape'.\"\"\"\n axis = parameters[\"axis\"]\n shape = parameters[\"base_shape\"][:]\n if axis < 0:\n axis += len(shape)\n if axis < len(shape):\n shape[axis] += delta\n return shape\n\n def build_graph(parameters):\n all_tensors = []\n for n in range(0, parameters[\"num_tensors\"]):\n input_tensor = tf.placeholder(dtype=tf.float32, name=(\"input%d\" % n),\n shape=get_shape(parameters, n))\n all_tensors.append(input_tensor)\n out = tf.concat(all_tensors, parameters[\"axis\"])\n return all_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n all_values = []\n for n in range(0, parameters[\"num_tensors\"]):\n input_values = create_tensor_data(np.float32,\n get_shape(parameters, n))\n all_values.append(input_values)\n return all_values, sess.run(\n outputs, feed_dict=dict(zip(inputs, all_values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_fully_connected_tests(zip_path):\n \"\"\"Make a set of tests to do fully_connected.\"\"\"\n\n test_parameters = [{\n \"shape1\": [[3, 3]],\n \"shape2\": [[3, 3]],\n \"transpose_a\": [True, False],\n \"transpose_b\": [True, False],\n \"constant_filter\": [True, False],\n }, {\n \"shape1\": [[4, 4], [1, 4], [4]],\n \"shape2\": [[4, 4], [4, 1], [4]],\n \"transpose_a\": [False],\n \"transpose_b\": [False],\n \"constant_filter\": [True, False],\n }, {\n \"shape1\": [[40, 37]],\n \"shape2\": [[37, 40]],\n \"transpose_a\": [False],\n \"transpose_b\": [False],\n \"constant_filter\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build a matmul graph given `parameters`.\"\"\"\n input_tensor1 = tf.placeholder(dtype=tf.float32, name=\"input1\",\n shape=parameters[\"shape1\"])\n\n # Get input_tensor2 either as a placeholder or constants. Also get a list of\n # the input tensors that are represented as placeholders.\n if parameters[\"constant_filter\"]:\n input_tensor2 = create_tensor_data(np.float32, parameters[\"shape2\"])\n input_tensors = [input_tensor1]\n else:\n input_tensor2 = tf.placeholder(\n dtype=tf.float32, name=\"input2\", shape=parameters[\"shape2\"])\n input_tensors = [input_tensor1, input_tensor2]\n\n out = tf.matmul(input_tensor1, input_tensor2,\n transpose_a=parameters[\"transpose_a\"],\n transpose_b=parameters[\"transpose_b\"])\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n # Build list of input values either containing 1 tensor (input_values1) or 2\n # tensors (input_values1, input_values2) based on whether the second input\n # is a constant or variable input.\n values = [create_tensor_data(np.float32, shape=parameters[\"shape1\"])]\n if not parameters[\"constant_filter\"]:\n values.append(create_tensor_data(np.float32, parameters[\"shape2\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_l2norm_tests(zip_path):\n \"\"\"Make a set of tests to do l2norm.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],\n [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],\n \"dim\": [0, 1, 2, 3, [2, 3], -2],\n \"epsilon\": [None, 1e-12, 1e-3],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n if parameters[\"epsilon\"]:\n out = tf.nn.l2_normalize(\n input_tensor, parameters[\"dim\"], epsilon=parameters[\"epsilon\"])\n else:\n out = tf.nn.l2_normalize(input_tensor, parameters[\"dim\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-4, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_local_response_norm_tests(zip_path):\n \"\"\"Make a set of tests to do local_response_norm.\"\"\"\n\n # Chose a set of parameters\n test_parameters = [{\n \"input_shape\": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],\n \"depth_radius\": [None, 0, 1, 3, 4, 5],\n \"bias\": [None, 0.1, 0.3, -0.1],\n \"alpha\": [None, 1, 2, -3],\n \"beta\": [None, 0.5, 0.25, 2],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=parameters[\"input_shape\"])\n out = tf.nn.local_response_normalization(\n input_tensor, depth_radius=parameters[\"depth_radius\"],\n bias=parameters[\"bias\"], alpha=parameters[\"alpha\"],\n beta=parameters[\"beta\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(\n np.float32, parameters[\"input_shape\"], min_value=-4, max_value=10)\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_pad_tests(zip_path):\n \"\"\"Make a set of tests to do pad.\"\"\"\n\n # TODO(nupurgarg): Add test for tf.uint8.\n test_parameters = [\n {\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[1, 1, 2, 1], [2, 1, 1, 1]],\n \"paddings\": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],\n [0, 0], [2, 3]]],\n \"constant_paddings\": [True, False],\n },\n # Non-4D use case.\n {\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[1, 2], [0, 1, 2]],\n \"paddings\": [[[0, 1], [2, 3]]],\n \"constant_paddings\": [True, False],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a pad graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n # Get paddings as either a placeholder or constants.\n if parameters[\"constant_paddings\"]:\n paddings = parameters[\"paddings\"]\n input_tensors = [input_tensor]\n else:\n shape = [len(parameters[\"paddings\"]), 2]\n paddings = tf.placeholder(dtype=tf.int32, name=\"padding\", shape=shape)\n input_tensors = [input_tensor, paddings]\n\n out = tf.pad(input_tensor, paddings=paddings)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_paddings\"]:\n values.append(np.array(parameters[\"paddings\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_reshape_tests(zip_path):\n \"\"\"Make a set of tests to do reshape.\"\"\"\n\n # All shapes below are suitable for tensors with 420 elements.\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],\n \"output_shape\": [[15, 28], [420], [1, -1, 5, 7], [-1]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.reshape(input_tensor, shape=parameters[\"output_shape\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_resize_bilinear_tests(zip_path):\n \"\"\"Make a set of tests to do resize_bilinear.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1, 3, 4, 3], [1, 10, 2, 1]],\n \"size\": [[1, 1], [4, 3], [2, 2], [5, 6]],\n \"align_corners\": [None, True, False],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.image.resize_bilinear(input_tensor, size=parameters[\"size\"],\n align_corners=parameters[\"align_corners\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_sigmoid_tests(zip_path):\n \"\"\"Make a set of tests to do sigmoid.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.sigmoid(input_tensor)\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_softmax_tests(zip_path):\n \"\"\"Make a set of tests to do softmax.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 3, 4, 3], [2, 3]],\n \"dim\": [-1, 0],\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[4, 7]],\n \"dim\": [-1, 1],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.nn.softmax(input_tensor, dim=parameters[\"dim\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_space_to_depth_tests(zip_path):\n \"\"\"Make a set of tests to do space_to_depth.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.float32, tf.float16, tf.int32, tf.uint8, tf.int64],\n \"input_shape\": [[2, 12, 24, 1]],\n \"block_size\": [2, 3, 4],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(dtype=parameters[\"dtype\"], name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.space_to_depth(input_tensor, block_size=parameters[\"block_size\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_space_to_batch_nd_tests(zip_path):\n \"\"\"Make a set of tests to do space_to_batch_nd.\"\"\"\n\n # TODO(nupurgarg): Add test for uint8.\n test_parameters = [\n {\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[1, 2, 2, 3], [2, 2, 4, 1]],\n \"block_shape\": [[1, 3], [2, 2]],\n \"paddings\": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],\n \"constant_block_shape\": [True, False],\n \"constant_paddings\": [True, False],\n },\n {\n \"dtype\": [tf.float32],\n \"input_shape\": [[2, 3, 7, 3]],\n \"block_shape\": [[1, 3], [2, 2]],\n \"paddings\": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],\n \"constant_block_shape\": [True, False],\n \"constant_paddings\": [True, False],\n },\n # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.\n {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 4, 4, 4, 1, 1]],\n \"block_shape\": [[2, 2, 2]],\n \"paddings\": [[[0, 0], [0, 0], [0, 0]]],\n \"constant_block_shape\": [True, False],\n \"constant_paddings\": [True, False],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a space_to_batch graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n input_tensors = [input_tensor]\n\n # Get block_shape either as a const or as a placeholder (tensor).\n if parameters[\"constant_block_shape\"]:\n block_shape = parameters[\"block_shape\"]\n else:\n shape = [len(parameters[\"block_shape\"])]\n block_shape = tf.placeholder(dtype=tf.int32, name=\"shape\", shape=shape)\n input_tensors.append(block_shape)\n\n # Get paddings either as a const or as a placeholder (tensor).\n if parameters[\"constant_paddings\"]:\n paddings = parameters[\"paddings\"]\n else:\n shape = [len(parameters[\"paddings\"]), 2]\n paddings = tf.placeholder(dtype=tf.int32, name=\"paddings\", shape=shape)\n input_tensors.append(paddings)\n\n out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_block_shape\"]:\n values.append(np.array(parameters[\"block_shape\"]))\n if not parameters[\"constant_paddings\"]:\n values.append(np.array(parameters[\"paddings\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_batch_to_space_nd_tests(zip_path):\n \"\"\"Make a set of tests to do batch_to_space_nd.\"\"\"\n\n test_parameters = [\n {\n \"dtype\": [tf.float32, tf.int64, tf.int32],\n \"input_shape\": [[12, 3, 3, 1]],\n \"block_shape\": [[1, 4], [2, 2], [3, 4]],\n \"crops\": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],\n \"constant_block_shape\": [True, False],\n \"constant_crops\": [True, False],\n },\n # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.\n {\n \"dtype\": [tf.float32],\n \"input_shape\": [[8, 2, 2, 2, 1, 1]],\n \"block_shape\": [[2, 2, 2]],\n \"crops\": [[[0, 0], [0, 0], [0, 0]]],\n \"constant_block_shape\": [True, False],\n \"constant_crops\": [True, False],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a batch_to_space graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n input_tensors = [input_tensor]\n\n # Get block_shape either as a const or as a placeholder (tensor).\n if parameters[\"constant_block_shape\"]:\n block_shape = parameters[\"block_shape\"]\n else:\n shape = [len(parameters[\"block_shape\"])]\n block_shape = tf.placeholder(dtype=tf.int32, name=\"shape\", shape=shape)\n input_tensors.append(block_shape)\n\n # Get crops either as a const or as a placeholder (tensor).\n if parameters[\"constant_crops\"]:\n crops = parameters[\"crops\"]\n else:\n shape = [len(parameters[\"crops\"]), 2]\n crops = tf.placeholder(dtype=tf.int32, name=\"crops\", shape=shape)\n input_tensors.append(crops)\n\n out = tf.batch_to_space_nd(input_tensor, block_shape, crops)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_block_shape\"]:\n values.append(np.array(parameters[\"block_shape\"]))\n if not parameters[\"constant_crops\"]:\n values.append(np.array(parameters[\"crops\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_transpose_tests(zip_path):\n \"\"\"Make a set of tests to do transpose.\"\"\"\n\n # TODO(nupurgarg): Add test for uint8.\n test_parameters = [{\n \"dtype\": [tf.int32, tf.int64, tf.float32],\n \"input_shape\": [[2, 2, 3]],\n \"perm\": [[0, 1, 2], [0, 2, 1]],\n \"constant_perm\": [True, False],\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 2, 3, 4]],\n \"perm\": [[0, 1, 2, 3], [3, 0, 1, 2]],\n \"constant_perm\": [True, False],\n }, {\n \"dtype\": [tf.float32],\n \"input_shape\": [[1, 2, 3, 4, 5]],\n \"perm\": [[4, 3, 2, 1, 0]],\n \"constant_perm\": [True, False],\n }]\n\n def build_graph(parameters):\n \"\"\"Build a transpose graph given `parameters`.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n\n if parameters[\"constant_perm\"]:\n perm = parameters[\"perm\"]\n input_tensors = [input_tensor]\n else:\n shape = [len(parameters[\"perm\"]), 2]\n perm = tf.placeholder(dtype=tf.int32, name=\"perm\", shape=shape)\n input_tensors = [input_tensor, perm]\n\n out = tf.transpose(input_tensor, perm=perm)\n return input_tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n values = [\n create_tensor_data(parameters[\"dtype\"], parameters[\"input_shape\"])\n ]\n if not parameters[\"constant_perm\"]:\n values.append(np.array(parameters[\"perm\"]))\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_squeeze_tests(zip_path):\n \"\"\"Make a set of tests to do squeeze.\"\"\"\n\n test_parameters = [{\n \"dtype\": [tf.int32, tf.float32, tf.int64],\n \"input_shape\": [[1, 2, 1, 3, 1, 4, 1, 1]],\n \"axis\": [\n None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2],\n [-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6],\n [0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5]\n ],\n }, {\n \"dtype\": [tf.int32, tf.float32, tf.int64],\n \"input_shape\": [[1]],\n \"axis\": [None, [], [0], [-1]],\n }, {\n \"dtype\": [tf.int32, tf.float32, tf.int64],\n \"input_shape\": [[1, 1, 1, 1, 1]],\n \"axis\": [None, [], [0], [3, 0], [-2, 0, 3, 2]],\n }]\n\n def build_graph(parameters):\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n out = tf.squeeze(input_tensor, axis=parameters[\"axis\"])\n return [input_tensor], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n return [input_values], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_values])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_strided_slice_tests(zip_path):\n \"\"\"Make a set of tests to do strided_slice.\"\"\"\n\n # TODO(soroosh): add test/support for uint8.\n test_parameters = [\n # 4-D\n {\n \"dtype\": [tf.float32, tf.int32, tf.int64],\n \"index_type\": [tf.int32],\n \"input_shape\": [[12, 2, 2, 5]],\n \"begin\": [[0, 0, 0, 0], [1, 0, 1, 0]],\n \"end\": [[8, 2, 2, 3], [12, 2, 2, 5]],\n \"strides\": [None, [2, 1, 3, 1]],\n \"begin_mask\": [None, 1, 8],\n \"end_mask\": [None, 1, 8],\n \"shrink_axis_mask\": [None, 1, 8, 11, 15, -1],\n \"constant_indices\": [False, True],\n },\n # TODO(b/73170889) Restore test paramaters removed in cl/191608113.\n # 2-D\n {\n \"dtype\": [tf.float32, tf.int32, tf.int64],\n \"index_type\": [tf.int32],\n \"input_shape\": [[2, 3]],\n \"begin\": [[0, 0], [1, 0]],\n \"end\": [[2, 3], [2, 2]],\n \"strides\": [None, [2, 2]],\n \"begin_mask\": [None, 1, 2],\n \"end_mask\": [None, 1, 2],\n \"shrink_axis_mask\": [None, 1, 2, 3, -1],\n \"constant_indices\": [False, True],\n },\n # Negative strides\n {\n \"dtype\": [tf.float32],\n \"index_type\": [tf.int32],\n \"input_shape\": [[2, 3]],\n \"begin\": [[0, -1]],\n \"end\": [[2, -3]],\n \"strides\": [[1, -1]],\n \"begin_mask\": [None, 1, 2],\n \"end_mask\": [None, 1, 2],\n \"shrink_axis_mask\": [None, 1, 2, 3, -1],\n \"constant_indices\": [False],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build graph for stride_slice test.\"\"\"\n input_tensor = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n if parameters[\"constant_indices\"]:\n begin = parameters[\"begin\"]\n end = parameters[\"end\"]\n strides = parameters[\"strides\"]\n tensors = [input_tensor]\n else:\n begin = tf.placeholder(\n dtype=parameters[\"index_type\"],\n name=\"begin\",\n shape=[len(parameters[\"input_shape\"])])\n end = tf.placeholder(\n dtype=parameters[\"index_type\"],\n name=\"end\",\n shape=[len(parameters[\"input_shape\"])])\n strides = (\n tf.placeholder(\n dtype=parameters[\"index_type\"],\n name=\"strides\",\n shape=[len(parameters[\"input_shape\"])])\n if parameters[\"strides\"] is not None else None)\n tensors = [input_tensor, begin, end]\n if strides is not None:\n tensors.append(strides)\n out = tf.strided_slice(\n input_tensor,\n begin,\n end,\n strides,\n begin_mask=parameters[\"begin_mask\"],\n end_mask=parameters[\"end_mask\"])\n return tensors, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Build inputs for stride_slice test.\"\"\"\n input_values = create_tensor_data(parameters[\"dtype\"],\n parameters[\"input_shape\"])\n index_type = _TF_TYPE_INFO[parameters[\"index_type\"]][0]\n values = [input_values]\n if not parameters[\"constant_indices\"]:\n begin_values = np.array(parameters[\"begin\"]).astype(index_type)\n end_values = np.array(parameters[\"end\"]).astype(index_type)\n stride_values = (\n np.array(parameters[\"strides\"]).astype(index_type)\n if parameters[\"strides\"] is not None else None)\n values.append(begin_values)\n values.append(end_values)\n if stride_values is not None:\n values.append(stride_values)\n\n return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_lstm_tests(zip_path):\n \"\"\"Make a set of tests to do basic Lstm cell.\"\"\"\n\n test_parameters = [\n {\n \"dtype\": [tf.float32],\n \"num_batchs\": [1],\n \"time_step_size\": [1],\n \"input_vec_size\": [3],\n \"num_cells\": [4],\n },\n ]\n\n def build_graph(parameters):\n \"\"\"Build a simple graph with BasicLSTMCell.\"\"\"\n\n num_batchs = parameters[\"num_batchs\"]\n time_step_size = parameters[\"time_step_size\"]\n input_vec_size = parameters[\"input_vec_size\"]\n num_cells = parameters[\"num_cells\"]\n inputs_after_split = []\n for i in xrange(time_step_size):\n one_timestamp_input = tf.placeholder(\n dtype=parameters[\"dtype\"],\n name=\"split_{}\".format(i),\n shape=[num_batchs, input_vec_size])\n inputs_after_split.append(one_timestamp_input)\n # Currently lstm identifier has a few limitations: only supports\n # forget_bias == 0, inner state activiation == tanh.\n # TODO(zhixianyan): Add another test with forget_bias == 1.\n # TODO(zhixianyan): Add another test with relu as activation.\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(\n num_cells, forget_bias=0.0, state_is_tuple=True)\n cell_outputs, _ = rnn.static_rnn(\n lstm_cell, inputs_after_split, dtype=tf.float32)\n out = cell_outputs[-1]\n return inputs_after_split, [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n \"\"\"Feed inputs, assign vairables, and freeze graph.\"\"\"\n\n with tf.variable_scope(\"\", reuse=True):\n kernel = tf.get_variable(\"rnn/basic_lstm_cell/kernel\")\n bias = tf.get_variable(\"rnn/basic_lstm_cell/bias\")\n kernel_values = create_tensor_data(\n parameters[\"dtype\"], [kernel.shape[0], kernel.shape[1]], -1, 1)\n bias_values = create_tensor_data(parameters[\"dtype\"], [bias.shape[0]], 0,\n 1)\n sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))\n\n num_batchs = parameters[\"num_batchs\"]\n time_step_size = parameters[\"time_step_size\"]\n input_vec_size = parameters[\"input_vec_size\"]\n input_values = []\n for _ in xrange(time_step_size):\n tensor_data = create_tensor_data(parameters[\"dtype\"],\n [num_batchs, input_vec_size], 0, 1)\n input_values.append(tensor_data)\n out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))\n return input_values, out\n\n # TODO(zhixianyan): Automatically generate rnn_states for lstm cell.\n extra_toco_options = ExtraTocoOptions()\n extra_toco_options.rnn_states = (\n \"{state_array:rnn/BasicLSTMCellZeroState/zeros,\"\n \"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},\"\n \"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,\"\n \"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}\")\n\n make_zip_of_tests(\n zip_path,\n test_parameters,\n build_graph,\n build_inputs,\n extra_toco_options,\n use_frozen_graph=True)\n\n\ndef make_l2_pool(input_tensor, ksize, strides, padding, data_format):\n \"\"\"Given an input perform a sequence of TensorFlow ops to produce l2pool.\"\"\"\n return tf.sqrt(tf.nn.avg_pool(\n tf.square(input_tensor), ksize=ksize, strides=strides,\n padding=padding, data_format=data_format))\n\n\ndef make_topk_tests(zip_path):\n \"\"\"Make a set of tests to do topk.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[10], [5, 20]],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the topk op testing graph.\"\"\"\n input_value = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n k = tf.constant(3, name=\"k\")\n out = tf.nn.top_k(input_value, k)\n return [input_value], [out[1]]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_arg_max_tests(zip_path):\n \"\"\"Make a set of tests to do arg_max.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32],\n \"input_shape\": [[1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],\n \"axis\": [0, 1, 2, 3],\n \"output_type\": [tf.int32, tf.int64],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the topk op testing graph.\"\"\"\n input_value = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input\",\n shape=parameters[\"input_shape\"])\n axis = tf.constant(parameters[\"axis\"], name=\"axis\")\n out = tf.arg_max(input_value, axis, output_type=parameters[\"output_type\"])\n return [input_value], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape\"])\n return [input_value], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n\ndef make_less_tests(zip_path):\n \"\"\"Make a set of tests to do less.\"\"\"\n\n test_parameters = [{\n \"input_dtype\": [tf.float32, tf.int32, tf.int64],\n \"input_shape_pair\": [([1, 1, 1, 3], [1, 1, 1, 3]),\n ([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),\n ([5, 5], [1]), ([10], [2, 4, 10])],\n }]\n\n def build_graph(parameters):\n \"\"\"Build the less op testing graph.\"\"\"\n input_value1 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input1\",\n shape=parameters[\"input_shape_pair\"][0])\n input_value2 = tf.placeholder(\n dtype=parameters[\"input_dtype\"],\n name=\"input2\",\n shape=parameters[\"input_shape_pair\"][1])\n out = tf.less(input_value1, input_value2)\n return [input_value1, input_value2], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_value1 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][0])\n input_value2 = create_tensor_data(parameters[\"input_dtype\"],\n parameters[\"input_shape_pair\"][1])\n return [input_value1, input_value2], sess.run(\n outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))\n\n make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)\n\n# Toco binary path provided by the generate rule.\nbin_path = None\n\n\ndef main(unused_args):\n global bin_path\n def mkdir_if_not_exist(x):\n if not os.path.isdir(x):\n os.mkdir(x)\n if not os.path.isdir(x):\n raise RuntimeError(\"Failed to create dir %r\" % x)\n\n opstest_path = os.path.join(FLAGS.output_path)\n mkdir_if_not_exist(opstest_path)\n\n out = FLAGS.zip_to_output\n bin_path = FLAGS.toco\n test_function = (\"make_%s_tests\" % out.replace(\".zip\", \"\"))\n if test_function not in globals():\n raise RuntimeError(\"Can't find a test function to create %r. Tried %r\" %\n (out, test_function))\n\n # TODO(ahentz): accessing globals() is not very elegant. We should either\n # break this file into multiple tests or use decorator-based registration to\n # avoid using globals().\n globals()[test_function](os.path.join(opstest_path, out))\n\n\nif __name__ == \"__main__\":\n FLAGS, unparsed = parser.parse_known_args()\n\n if unparsed:\n print(\"Usage: %s <path out> <zip file to generate>\")\n else:\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.python.util.all_util.remove_undocumented"
],
[
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.nn.top_k",
"tensorflow.ones",
"numpy.random.seed",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.squeeze",
"tensorflow.image.resize_bilinear",
"tensorflow.concat",
"tensorflow.python.ops.rnn.static_rnn",
"tensorflow.nn.softmax",
"tensorflow.split",
"tensorflow.nn.batch_norm_with_global_normalization",
"tensorflow.assert_greater_equal",
"tensorflow.arg_max",
"tensorflow.minimum",
"tensorflow.device",
"tensorflow.nn.log_softmax",
"tensorflow.space_to_batch_nd",
"tensorflow.less",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.nn.relu",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.contrib.rnn.BasicLSTMCell",
"numpy.zeros",
"tensorflow.app.run",
"tensorflow.strided_slice",
"tensorflow.space_to_depth",
"tensorflow.global_variables",
"tensorflow.Session",
"tensorflow.contrib.lite.testing.generate_examples_report.make_report_table",
"tensorflow.reset_default_graph",
"tensorflow.nn.local_response_normalization",
"tensorflow.control_dependencies",
"tensorflow.pad",
"tensorflow.nn.l2_normalize",
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.batch_to_space_nd",
"numpy.random.random_sample",
"tensorflow.logging.info",
"tensorflow.reduce_mean",
"tensorflow.add",
"tensorflow.nn.conv2d",
"tensorflow.exp",
"tensorflow.square",
"tensorflow.nn.fused_batch_norm",
"numpy.array",
"tensorflow.gather",
"numpy.random.randint",
"tensorflow.get_variable",
"tensorflow.maximum"
]
] |
neurodatascience/watts_up_compute | [
"1ed41e62690f99f699b44180208689cc19616bb7"
] | [
"run_scripts/FreeSurfer/nipype_reconall_with_tracker.py"
] | [
"# Import modules\nimport os\nimport sys\nfrom os.path import join as opj\nimport pandas as pd\nimport time\nfrom nipype.interfaces.freesurfer import ReconAll\nfrom nipype.interfaces.utility import IdentityInterface\nfrom nipype.pipeline.engine import Workflow, Node\nfrom pypapi import events, papi_high as high\nimport argparse\n\n# Add paths (singularity should see these)\n# FastSurfer and carbon trackers are in the mounted dir as these repos keep getting updated.\n# TODO replace this with setup.py once the dependencis become stable\n# sys.path.append('../../../experiment-impact-tracker/')\n# sys.path.append('../../../codecarbon/')\n\nfrom experiment_impact_tracker.compute_tracker import ImpactTracker\nfrom codecarbon import EmissionsTracker, OfflineEmissionsTracker\n\ndef get_reconall(recon_directive,fs_folder):\n # This node represents the actual recon-all command\n reconall = Node(ReconAll(directive=recon_directive,\n flags='-nuintensitycor -3T',\n subjects_dir=fs_folder),\n name=\"reconall\")\n return reconall\n \n\n# This function returns for each subject the path to struct.nii.gz\ndef pathfinder(subject, foldername, filename):\n from os.path import join as opj\n struct_path = opj(foldername, subject, filename)\n return struct_path\n\n\ndef main():\n # setup\n exp_start_time = time.time()\n \n # argparse\n parser = argparse.ArgumentParser(description='Script to run freesurfer reconall with nipype and track compute costs', epilog='$Id: fast_surfer_cnn, v 1.0 2019/09/30$')\n\n # Data\n parser.add_argument('--experiment_dir', dest='experiment_dir', help='path to directory to store freesurfer derived data.')\n parser.add_argument('--data_dir', help=\"path to input data\", default='/neurohub/ukbb/imaging/')\n parser.add_argument('--subject_id', dest='subject_id', help='subject_id')\n parser.add_argument('--T1_identifier', help='T1 identifier string relateive to the subject directory')\n\n # FreeSurfer\n parser.add_argument('--recon_directive', dest='recon_directive', help='recon_directive (autorecon 1, 2, or 3)', default='1') #MTL\n \n # Trackers\n parser.add_argument('--tracker_log_dir', dest='tracker_log_dir',\n help=\"log dir for experiment impact tracker\",\n type=str, default='./tracker_logs/')\n parser.add_argument('--geo_loc', dest='geo_loc',\n help=\"(lat,log) coords for experiment impact tracker\",\n type=str, default='45.4972159,-73.6103642') #MTL Beluga\n parser.add_argument('--CC_offline',\n help=\"Run CC in offline mode\",\n action='store_true') \n parser.add_argument('--TZ', dest='TZ',\n help=\"TimeZone\",\n type=str, default='America/New_York')\n parser.add_argument('--iso_code', dest='iso_code',\n help=\"Country ISO code\",\n type=str, default='USA')\n \n # PAPI\n parser.add_argument('--count_FLOPs', dest='count_FLOPs',help=\"Count FLOPs using PAPI\",action='store_true') \n\n args = parser.parse_args()\n\n # Data\n experiment_dir = args.experiment_dir\n data_dir = args.data_dir\n subject_id = args.subject_id\n T1_identifier = args.T1_identifier\n\n # FreeSurfer\n recon_directive = args.recon_directive\n\n # FLOPs\n count_FLOPs = args.count_FLOPs\n\n # Trackers\n tracker_log_dir = args.tracker_log_dir\n geo_loc = args.geo_loc\n CC_offline = args.CC_offline\n TZ = args.TZ\n iso_code = args.iso_code\n\n print(f'Using offline mode for CC tracker: {CC_offline}')\n if CC_offline:\n print(f'Using {TZ} timezone and {iso_code} country iso code')\n \n print(f'Starting subject: {subject_id}')\n\n # Set up the trackers\n log_dir = '{}/{}/'.format(tracker_log_dir,subject_id)\n log_dir_EIT = f'{log_dir}/EIT/'\n log_dir_CC = f'{log_dir}/CC/'\n\n for d in [log_dir_EIT,log_dir_CC]:\n if not os.path.exists(d):\n os.makedirs(d)\n\n # Use specified geo location for the HPC\n ly,lx = float(geo_loc.split(',')[0]), float(geo_loc.split(',')[1])\n coords = (ly,lx)\n print(f'Using geographical coordinates (long,lat): {coords}')\n\n # EIT tracker\n tracker_EIT = ImpactTracker(log_dir_EIT,coords)\n tracker_EIT.launch_impact_monitor()\n\n # CodeCarbon tracker\n os.environ['TZ']= TZ\n \n if CC_offline:\n tracker_CC = OfflineEmissionsTracker(output_dir=log_dir_CC, country_iso_code=iso_code) \n else:\n tracker_CC = EmissionsTracker(output_dir=log_dir_CC)\n \n tracker_CC.start()\n\n if count_FLOPs:\n print('Counting flops using PAPI')\n flop_csv = tracker_log_dir + 'compute_costs_flop.csv'\n flop_df = pd.DataFrame(columns=['task','start_time','duration','DP'])\n \n\n # Start FS processing for a given subject\n subject_list = [subject_id]\n\n fs_folder = opj(experiment_dir, 'freesurfer') # location of freesurfer folder\n\n # Create the output folder - FreeSurfer can only run if this folder exists\n os.system('mkdir -p %s' % fs_folder)\n\n # Specify recon workflow stages\n if recon_directive == 'all':\n recon_directives = ['autorecon1','autorecon2','autorecon3']\n else:\n recon_directives = [recon_directive] \n\n\n for r, recon_directive in enumerate(recon_directives):\n print('\\nStarting stage: {}'.format(recon_directive))\n\n # Create the pipeline that runs the recon-all command\n reconflow = Workflow(name=\"reconflow\")\n reconflow.base_dir = opj(experiment_dir, 'workingdir_reconflow')\n\n # Some magical stuff happens here (not important for now)\n infosource = Node(IdentityInterface(fields=['subject_id']), name=\"infosource\")\n infosource.iterables = ('subject_id', subject_list)\n \n # Specify recon-all stage based on recon-directive\n reconall = get_reconall(recon_directive, fs_folder)\n # This section connects all the nodes of the pipeline to each other\n reconflow.connect([(infosource, reconall, [('subject_id', 'subject_id')]),\n (infosource, reconall, [(('subject_id', pathfinder,\n data_dir, T1_identifier),\n 'T1_files')]),\n ])\n \n if count_FLOPs:\n # start flop counter\n start_time = time.time()\n high.start_counters([events.PAPI_DP_OPS,]) #default: PAPI_FP_OPS\n\n # This command runs the recon-all pipeline in parallel (using n_procs cores)\n # reconflow.run('MultiProc', plugin_args={'n_procs': 4})\n reconflow.run() \n\n if count_FLOPs:\n # stop flop counter\n DP = high.stop_counters()[0]\n end_time = time.time()\n duration = end_time - start_time\n print('Duration: {}, Flops: {}'.format(duration, DP))\n\n flop_df.loc[r] = [recon_directive,start_time, duration, DP]\n\n ## code-carbon tracker\n tracker_CC.stop()\n \n if count_FLOPs:\n flop_df.to_csv(flop_csv)\n\nif __name__=='__main__':\n main()\n"
] | [
[
"pandas.DataFrame"
]
] |
strint/myia | [
"3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a"
] | [
"examples/vae.py"
] | [
"\"\"\"Example of an MLP in Myia.\n\nMyia is still a work in progress, and this example may change in the future.\n\"\"\"\n\nimport time\nfrom dataclasses import dataclass\n\nimport numpy\nimport torch\nfrom numpy.random import RandomState\nfrom torchvision import datasets, transforms\n\nimport myia.public_api as pub\nfrom myia import ArithmeticData, myia, value_and_grad\nfrom myia.api import to_device\nfrom myia.debug import traceback # noqa\nfrom myia.operations import array_exp, array_pow, random_initialize\n\n###########\n# Options #\n###########\n\n\ndtype = \"float32\"\n\nbackend = \"pytorch\"\n# backend = 'relay' # Uncomment to use relay backend\n\ndevice_type = \"cpu\"\n# device_type = 'cuda' # Uncomment to run on the gpu\n\nbackend_options_dict = {\n \"pytorch\": {\"device\": device_type},\n \"relay\": {\"target\": device_type, \"device_id\": 0},\n}\n\nbackend_options = backend_options_dict[backend]\n\n###############\n# Hyperparams #\n###############\n\n\nlr = getattr(numpy, dtype)(0.01)\n\n\n########\n# Data #\n########\n\n\n# This just generates random data so we don't have to load a real dataset,\n# but the model will work just as well on a real dataset.\n\n\ndef param(R, *size):\n \"\"\"Generates a random array using the generator R.\"\"\"\n return numpy.array(R.rand(*size) * 2 - 1, dtype=dtype)\n\n\ndef generate_data(n, batch_size, input_size, target_size, *, seed=87):\n \"\"\"Generate inputs and targets.\n\n Generates n batches of samples of size input_size, matched with\n a single target.\n \"\"\"\n R = RandomState(seed=seed)\n return [\n (param(R, batch_size, input_size), param(R, batch_size, target_size))\n for i in range(n)\n ]\n\n\ndef mlp_parameters(*layer_sizes, seed=90909):\n \"\"\"Generates parameters for a MLP given a list of layer sizes.\"\"\"\n R = RandomState(seed=seed)\n parameters = []\n for i, o in zip(layer_sizes[:-1], layer_sizes[1:]):\n W = param(R, i, o)\n b = param(R, 1, o)\n parameters.append((W, b))\n return parameters\n\n\n#########\n# Model #\n#########\n\n\n# We generate a MLP model with some arbitrary number of layers and tanh\n# activations.\n\n\n@dataclass(frozen=True)\nclass Linear(ArithmeticData):\n \"\"\"Linear layer.\"\"\"\n\n W: \"Weights array\"\n b: \"Biases vector\"\n\n def apply(self, input):\n \"\"\"Apply the layer.\"\"\"\n return input @ self.W + self.b\n\n\n@dataclass(frozen=True)\nclass Tanh(ArithmeticData):\n \"\"\"Tanh layer.\"\"\"\n\n def apply(self, input):\n \"\"\"Apply the layer.\"\"\"\n return numpy.tanh(input)\n\n\n@dataclass(frozen=True)\nclass Sequential(ArithmeticData):\n \"\"\"Sequential layer, applies all sub-layers in order.\"\"\"\n\n layers: \"Tuple of layers\"\n\n def apply(self, x):\n \"\"\"Apply the layer.\"\"\"\n for layer in self.layers:\n x = layer.apply(x)\n return x\n\n\n@dataclass(frozen=True)\nclass VAE(ArithmeticData):\n \"\"\"Sequential layer, applies all sub-layers in order.\"\"\"\n\n fc1: \"layer fc1\"\n fc21: \"layer fc21\"\n fc22: \"layer fc22\"\n fc3: \"layer fc3\"\n fc4: \"layer fc4\"\n\n def encode(self, x):\n h1 = pub.relu(self.fc1.apply(x))\n return self.fc21.apply(h1), self.fc22.apply(h1)\n\n def reparameterize(self, mu, logvar, rstate):\n std = array_exp(0.5 * logvar)\n eps, rstate = pub.uniform(rstate, (2, 20), -1.0, 1.0)\n return mu + eps * std, rstate\n\n def decode(self, z):\n h3 = pub.relu(self.fc3.apply(z))\n return pub.sigmoid(self.fc4.apply(h3))\n\n def forward(self, x, rstate):\n mu, logvar = self.encode(pub.reshape(x, (-1, 784)))\n z, rstate = self.reparameterize(mu, logvar, rstate)\n return self.decode(z), mu, logvar, rstate\n\n\nparams = (\n mlp_parameters(*(784, 400))[0],\n mlp_parameters(*(400, 20))[0],\n mlp_parameters(*(400, 20))[0],\n mlp_parameters(*(20, 400))[0],\n mlp_parameters(*(400, 784))[0],\n)\n\nmodel = VAE(\n Linear(params[0][0], params[0][1]),\n Linear(params[1][0], params[1][1]),\n Linear(params[2][0], params[2][1]),\n Linear(params[3][0], params[3][1]),\n Linear(params[4][0], params[4][1]),\n)\n\nmodel = to_device(model, backend, backend_options, broaden=False)\n\n\n# Reconstruction + KL divergence losses summed over all elements and batch\ndef loss_function(recon_x, x, mu, logvar):\n BCE = pub.binary_cross_entropy(\n recon_x, pub.reshape(x, (-1, 784)), reduction=\"sum\"\n )\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * pub._sum(1 + logvar - array_pow(mu, 2) - array_exp(logvar))\n\n return BCE + KLD\n\n\ndef cost(model, data, rstate):\n recon_batch, mu, logvar, _rstate = model.forward(data, rstate)\n loss = loss_function(recon_batch, data, mu, logvar)\n return loss.item(), _rstate\n\n\n@myia(backend=backend, backend_options=backend_options, return_backend=True)\ndef step(model, data, lr, rstate):\n \"\"\"Returns the loss and parameter gradients.\n\n value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.\n The 'model' argument can be omitted: by default the derivative wrt\n the first argument is returned.\n \"\"\"\n (_cost, rstate), dmodel = value_and_grad(cost, \"model\")(\n model, data, rstate, dout=(1, 1)\n )\n return _cost, model - lr * dmodel, rstate\n\n\n@myia(backend=backend, backend_options=backend_options, return_backend=True)\ndef step_eval(model, data, rstate):\n \"\"\"Returns the loss and parameter gradients.\n\n value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.\n The 'model' argument can be omitted: by default the derivative wrt\n the first argument is returned.\n \"\"\"\n return cost(model, data, rstate)\n\n\n@myia(backend=backend, backend_options=backend_options, return_backend=True)\ndef step_init_seed():\n \"\"\"Returns the loss and parameter gradients.\n\n value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.\n The 'model' argument can be omitted: by default the derivative wrt\n the first argument is returned.\n \"\"\"\n return random_initialize(1)\n\n\nlr = getattr(numpy, dtype)(0.01)\n\nif __name__ == \"__main__\":\n seed = 123\n cuda = False\n batch_size = 2\n epochs = 1\n\n torch.manual_seed(seed)\n\n device = torch.device(\"cuda\" if cuda else \"cpu\")\n\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"../data\",\n train=True,\n download=True,\n transform=transforms.ToTensor(),\n ),\n batch_size=batch_size,\n shuffle=True,\n **kwargs,\n )\n\n rand_state = step_init_seed()\n\n for _ in range(epochs):\n costs = []\n t0 = time.time()\n for i, (data, _) in enumerate(train_loader):\n print(\"i\", i + 1, \"/\", len(train_loader))\n _cost, model, rand_state = step(\n model, data.reshape((batch_size, 784)).numpy(), lr, rand_state\n )\n costs.append(_cost)\n costs = [float(c.from_device()) for c in costs]\n c = sum(costs) / len(costs)\n t = time.time() - t0\n print(f\"Cost: {c:15.10f}\\tTime: {t:15.10f}\")\n\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\"../data\", train=False, transform=transforms.ToTensor()),\n batch_size=batch_size,\n shuffle=True,\n **kwargs,\n )\n\n costs = []\n t0 = time.time()\n for i, (data, _) in enumerate(test_loader):\n _cost, rand_state = step_eval(\n model, data.reshape((batch_size, 784)).numpy(), rand_state\n )\n costs.append(_cost)\n costs = [float(c.from_device()) for c in costs]\n c = sum(costs) / len(costs)\n t = time.time() - t0\n print(f\"Cost: {c:15.10f}\\tTime: {t:15.10f}\")\n"
] | [
[
"numpy.random.RandomState",
"torch.manual_seed",
"torch.device",
"numpy.tanh"
]
] |
GlobalMaksimum/sadedegel | [
"8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b"
] | [
"sadedegel/bblock/vocabulary.py"
] | [
"import warnings\nfrom collections import defaultdict\nfrom os.path import dirname\nfrom pathlib import Path\n\nimport h5py\nimport numpy as np\nfrom cached_property import cached_property\nfrom rich.console import Console\n\nfrom .util import tr_lower, normalize_tokenizer_name\n\nconsole = Console()\n\n\nclass InvalidTokenizer(Exception):\n \"\"\"Invalid tokenizer name\"\"\"\n\n\ndef vocabulary_file(tokenizer: str, verify_exists=True):\n normalized_name = normalize_tokenizer_name(tokenizer)\n\n if normalized_name not in ['bert', 'icu', 'simple']:\n raise InvalidTokenizer(\n (f\"Currently only valid tokenizers are BERT, ICU Tokenizer for vocabulary generation.\"\n \" {normalized_name} found\"))\n\n vocab_file = Path(dirname(__file__)) / 'data' / normalized_name / 'vocabulary.hdf5'\n\n if not vocab_file.exists() and verify_exists:\n raise FileNotFoundError(f\"Vocabulary file for {tokenizer} ({normalized_name}) tokenizer not found.\")\n\n return vocab_file\n\n\nclass VocabularyCounter:\n def __init__(self, tokenizer, case_sensitive=True, min_tf=1, min_df=1):\n self.tokenizer = tokenizer\n\n self.doc_counter = defaultdict(set)\n self.doc_set = set()\n\n self.term_freq = defaultdict(int)\n\n self.min_tf = min_tf\n self.min_df = min_df\n self.case_sensitive = case_sensitive\n\n def inc(self, word: str, document_id: int, count: int = 1):\n if self.case_sensitive:\n w = word\n else:\n w = tr_lower(word)\n\n self.doc_counter[w].add(document_id)\n self.doc_set.add(document_id)\n self.term_freq[w] += count\n\n def add_word_to_doc(self, word: str, document_id: int):\n \"\"\"Implemented for backward compatibility\"\"\"\n\n self.inc(word, document_id, 1)\n\n @property\n def vocabulary_size(self):\n return len(self.term_freq)\n\n @property\n def document_count(self):\n return len(self.doc_set)\n\n def prune(self):\n\n to_remove = []\n\n for w in self.term_freq:\n if self.term_freq[w] < self.min_tf or len(self.doc_counter[w]) < self.min_df:\n to_remove.append(w)\n\n for w in to_remove:\n del self.doc_counter[w]\n del self.term_freq[w]\n\n console.log(\n f\"{len(to_remove)} terms (case sensitive={self.case_sensitive}) are pruned by tf (>= {self.min_tf}) or df filter(>= {self.min_df})\")\n\n return self\n\n def df(self, w: str):\n if self.case_sensitive:\n return len(self.doc_counter[w])\n else:\n return len(self.doc_counter[tr_lower(w)])\n\n def tf(self, w: str):\n if self.case_sensitive:\n return self.term_freq[w]\n else:\n return self.term_freq[tr_lower(w)]\n\n def to_hdf5(self, w2v=None):\n with h5py.File(vocabulary_file(self.tokenizer, verify_exists=False), \"a\") as fp:\n if self.case_sensitive:\n group = fp.create_group(\"form_\")\n else:\n group = fp.create_group(\"lower_\")\n\n words = sorted(list(self.term_freq.keys()), key=lambda w: tr_lower(w))\n\n group.attrs['size'] = len(words)\n group.attrs['document_count'] = len(self.doc_set)\n group.attrs['tokenizer'] = self.tokenizer\n group.attrs['min_tf'] = self.min_tf\n group.attrs['min_df'] = self.min_df\n\n if w2v is not None:\n group.attrs['vector_size'] = w2v.vector_size\n\n group.create_dataset(\"vector\", data=np.array(\n [w2v[w] if w in w2v else np.zeros(w2v.vector_size) for w in words]).astype(\n np.float32),\n compression=\"gzip\",\n compression_opts=9)\n group.create_dataset(\"has_vector\", data=np.array([w in w2v in w2v for w in words]),\n compression=\"gzip\",\n compression_opts=9)\n\n group.create_dataset(\"word\", data=words, compression=\"gzip\", compression_opts=9)\n group.create_dataset(\"df\", data=np.array([self.df(w) for w in words]), compression=\"gzip\",\n compression_opts=9)\n group.create_dataset(\"tf\", data=np.array([self.tf(w) for w in words]), compression=\"gzip\",\n compression_opts=9)\n\n console.print(f\"|D|: {self.document_count}, |V|: {self.vocabulary_size} (case sensitive={self.case_sensitive})\")\n\n\nclass Vocabulary:\n\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n self.file_name = vocabulary_file(tokenizer)\n self._df = None\n self._df_cs = None\n self._has_vector = None\n self._vector = None\n\n self.dword_cs = None\n self.dword = None\n\n @cached_property\n def size_cs(self) -> int:\n with h5py.File(self.file_name, \"r\") as fp:\n return fp['form_'].attrs['size']\n\n @cached_property\n def size(self) -> int:\n with h5py.File(self.file_name, \"r\") as fp:\n return fp['lower_'].attrs['size']\n\n def __len__(self):\n return self.size\n\n def id_cs(self, word: str, default: int = -1):\n if self.dword_cs is None:\n with h5py.File(self.file_name, \"r\") as fp:\n self.dword = dict((b.decode(\"utf-8\"), i) for i, b in enumerate(list(fp['lower_']['word'])))\n self.dword_cs = dict((b.decode(\"utf-8\"), i) for i, b in enumerate(list(fp['form_']['word'])))\n\n return self.dword_cs.get(word, default)\n\n def id(self, word: str, default: int = -1):\n if self.dword is None:\n with h5py.File(self.file_name, \"r\") as fp:\n self.dword = dict((b.decode(\"utf-8\"), i) for i, b in enumerate(list(fp['lower_']['word'])))\n self.dword_cs = dict((b.decode(\"utf-8\"), i) for i, b in enumerate(list(fp['form_']['word'])))\n\n return self.dword.get(tr_lower(word), default)\n\n def df(self, word: str):\n\n i = self.id(word)\n\n if i == -1:\n return 0\n else:\n if self._df is None:\n with h5py.File(self.file_name, \"r\") as fp:\n self._df = np.array(fp['lower_']['df'])\n\n return self._df[i]\n\n def df_cs(self, word: str):\n\n i = self.id_cs(word)\n\n if i == -1:\n return 0\n else:\n if self._df_cs is None:\n with h5py.File(self.file_name, \"r\") as fp:\n self._df_cs = np.array(fp['form_']['df'])\n\n return self._df_cs[i]\n\n def has_vector(self, word: str):\n with h5py.File(self.file_name, \"r\") as fp:\n if \"has_vector\" in fp['lower_']:\n i = self.id(word)\n\n if i == -1:\n return False\n else:\n if self._has_vector is None:\n self._has_vector = np.array(fp['lower_']['has_vector'])\n\n return self._has_vector[i]\n else:\n return False\n\n def vector(self, word: str):\n # TODO: Performance improvement required\n with h5py.File(self.file_name, \"r\") as fp:\n if \"vector\" in fp['lower_']:\n i = self.id(word)\n\n if i == -1:\n return False\n else:\n if self._vector is None:\n self._vector = np.array(fp['lower_']['vector'])\n\n return self._vector[i, :]\n else:\n return False\n\n @cached_property\n def document_count(self):\n with h5py.File(self.file_name, \"r\") as fp:\n return fp['form_'].attrs['document_count']\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
xujing1994/open_spiel | [
"7663a2717f16ff84c0d6a6bfdf19a9c21b37b765"
] | [
"open_spiel/python/examples/hearts_supervised_learning.py"
] | [
"# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Train a policy net on Hearts actions based given a dataset of trajectories.\n\nTrajectories from the Hearts bot Xinxin can be generated using\nopen_spiel/games/hearts/xinxin_game_generator.cc.\n\"\"\"\n\nimport os\nimport pickle\nfrom typing import Any, Tuple\n\nfrom absl import app\nfrom absl import flags\n\nimport haiku as hk\nimport jax\nfrom jax import numpy as jnp\nfrom jax.experimental import optix\nimport numpy as np\n\nimport pyspiel\n\nOptState = Any\nParams = Any\n\nFLAGS = flags.FLAGS\nGAME = pyspiel.load_game('hearts')\nNUM_CARDS = 52\nNUM_ACTIONS = NUM_CARDS\nNUM_PLAYERS = 4\nTOP_K_ACTIONS = 5 # How many alternative actions to display\nDEFAULT_LAYER_SIZES = [1024, 1024, 1024, 1024]\n\nflags.DEFINE_integer('iterations', 100000, 'Number of iterations')\nflags.DEFINE_string('data_path', None, 'Location for data')\nflags.DEFINE_integer('eval_every', 10000, 'How often to evaluate the policy')\nflags.DEFINE_integer('num_examples', 3,\n 'How many examples to print per evaluation')\nflags.DEFINE_integer('train_batch', 128, 'Batch size for training step')\nflags.DEFINE_integer('eval_batch', 10000, 'Batch size when evaluating')\nflags.DEFINE_float('step_size', 1e-4, 'Step size for training')\nflags.DEFINE_list('hidden_layer_sizes', None,\n 'Number of hidden units and layers in the network')\nflags.DEFINE_integer('rng_seed', 42, 'Seed for initial network weights')\nflags.DEFINE_string('save_path', None, 'Location for saved networks')\nflags.DEFINE_string('checkpoint_file', None,\n 'Provides weights and optimzer state to resume training')\n\n\ndef _trajectory(line: str):\n \"\"\"Returns parsed action trajectory.\"\"\"\n actions = [int(x) for x in line.split(' ')]\n return tuple(actions)\n\n\ndef make_dataset(file: str):\n \"\"\"Creates dataset as a generator of single examples.\"\"\"\n lines = [line for line in open(file)]\n while True:\n np.random.shuffle(lines)\n for line in lines:\n trajectory = _trajectory(line)\n # skip pass_dir and deal actions\n action_index = np.random.randint(NUM_CARDS + 1, len(trajectory))\n state = GAME.new_initial_state()\n for action in trajectory[:action_index]:\n state.apply_action(action)\n yield (state.information_state_tensor(), trajectory[action_index])\n\n\ndef batch(dataset, batch_size: int):\n \"\"\"Creates a batched dataset from a one-at-a-time dataset.\"\"\"\n observations = np.zeros([batch_size] + GAME.information_state_tensor_shape(),\n np.float32)\n labels = np.zeros(batch_size, dtype=np.int32)\n while True:\n for batch_index in range(batch_size):\n observations[batch_index], labels[batch_index] = next(dataset)\n yield observations, labels\n\n\ndef one_hot(x, k):\n \"\"\"Returns a one-hot encoding of `x` of size `k`.\"\"\"\n return jnp.array(x[..., jnp.newaxis] == jnp.arange(k), dtype=np.float32)\n\n\ndef net_fn(x):\n \"\"\"Haiku module for our network.\"\"\"\n layers = []\n for layer_size in FLAGS.hidden_layer_sizes:\n layers.append(hk.Linear(int(layer_size)))\n layers.append(jax.nn.relu)\n layers.append(hk.Linear(NUM_ACTIONS))\n layers.append(jax.nn.log_softmax)\n net = hk.Sequential(layers)\n return net(x)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n if FLAGS.hidden_layer_sizes is None:\n # Cannot pass default arguments as lists due to style requirements, so we\n # override it here if they are not set.\n FLAGS.hidden_layer_sizes = DEFAULT_LAYER_SIZES\n\n # Make the network.\n net = hk.without_apply_rng(hk.transform(net_fn, apply_rng=True))\n\n # Make the optimiser.\n opt = optix.adam(FLAGS.step_size)\n\n @jax.jit\n def loss(\n params: Params,\n inputs: np.ndarray,\n targets: np.ndarray,\n ) -> jnp.DeviceArray:\n \"\"\"Cross-entropy loss.\"\"\"\n assert targets.dtype == np.int32\n log_probs = net.apply(params, inputs)\n return -jnp.mean(one_hot(targets, NUM_ACTIONS) * log_probs)\n\n @jax.jit\n def accuracy(\n params: Params,\n inputs: np.ndarray,\n targets: np.ndarray,\n ) -> jnp.DeviceArray:\n \"\"\"Classification accuracy.\"\"\"\n predictions = net.apply(params, inputs)\n return jnp.mean(jnp.argmax(predictions, axis=-1) == targets)\n\n @jax.jit\n def update(\n params: Params,\n opt_state: OptState,\n inputs: np.ndarray,\n targets: np.ndarray,\n ) -> Tuple[Params, OptState]:\n \"\"\"Learning rule (stochastic gradient descent).\"\"\"\n _, gradient = jax.value_and_grad(loss)(params, inputs, targets)\n updates, opt_state = opt.update(gradient, opt_state)\n new_params = optix.apply_updates(params, updates)\n return new_params, opt_state\n\n def output_samples(params: Params, max_samples: int):\n \"\"\"Output some cases where the policy disagrees with the dataset action.\"\"\"\n if max_samples == 0:\n return\n count = 0\n with open(os.path.join(FLAGS.data_path, 'test.txt')) as f:\n lines = list(f)\n np.random.shuffle(lines)\n for line in lines:\n state = GAME.new_initial_state()\n actions = _trajectory(line)\n for action in actions:\n if not state.is_chance_node():\n observation = np.array(state.information_state_tensor(), np.float32)\n policy = np.exp(net.apply(params, observation))\n probs_actions = [(p, a) for a, p in enumerate(policy)]\n pred = max(probs_actions)[1]\n if pred != action:\n print(state)\n for p, a in reversed(sorted(probs_actions)[-TOP_K_ACTIONS:]):\n print('{:7} {:.2f}'.format(state.action_to_string(a), p))\n print('Ground truth {}\\n'.format(state.action_to_string(action)))\n count += 1\n break\n state.apply_action(action)\n if count >= max_samples:\n return\n\n # Store what we need to rebuild the Haiku net.\n if FLAGS.save_path:\n filename = os.path.join(FLAGS.save_path, 'layers.txt')\n with open(filename, 'w') as layer_def_file:\n for s in FLAGS.hidden_layer_sizes:\n layer_def_file.write(f'{s} ')\n layer_def_file.write('\\n')\n\n # Make datasets.\n if FLAGS.data_path is None:\n raise app.UsageError(\n 'Please generate your own supervised training data and supply the local'\n 'location as --data_path')\n train = batch(\n make_dataset(os.path.join(FLAGS.data_path, 'train.txt')),\n FLAGS.train_batch)\n test = batch(\n make_dataset(os.path.join(FLAGS.data_path, 'test.txt')), FLAGS.eval_batch)\n\n # Initialize network and optimiser.\n if FLAGS.checkpoint_file:\n with open(FLAGS.checkpoint_file, 'rb') as pkl_file:\n params, opt_state = pickle.load(pkl_file)\n else:\n rng = jax.random.PRNGKey(FLAGS.rng_seed) # seed used for network weights\n inputs, unused_targets = next(train)\n params = net.init(rng, inputs)\n opt_state = opt.init(params)\n\n # Train/eval loop.\n for step in range(FLAGS.iterations):\n # Do SGD on a batch of training examples.\n inputs, targets = next(train)\n params, opt_state = update(params, opt_state, inputs, targets)\n\n # Periodically evaluate classification accuracy on the test set.\n if (1 + step) % FLAGS.eval_every == 0:\n inputs, targets = next(test)\n test_accuracy = accuracy(params, inputs, targets)\n print(f'After {1+step} steps, test accuracy: {test_accuracy}.')\n if FLAGS.save_path:\n filename = os.path.join(FLAGS.save_path, f'checkpoint-{1 + step}.pkl')\n with open(filename, 'wb') as pkl_file:\n pickle.dump((params, opt_state), pkl_file)\n output_samples(params, FLAGS.num_examples)\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"numpy.random.shuffle",
"numpy.zeros"
]
] |
efajardo-nv/cuml | [
"bc86714836284ed4752c267513e5d447e884e1c5"
] | [
"python/cuml/test/test_trustworthiness.py"
] | [
"# Copyright (c) 2018-2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nfrom sklearn.manifold.t_sne import trustworthiness as sklearn_trustworthiness\nfrom cuml.metrics import trustworthiness as cuml_trustworthiness\n\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom umap import UMAP\n\nimport cudf\nimport numpy as np\n\n\[email protected]('input_type', ['ndarray'])\[email protected]('n_samples', [10, 100])\[email protected]('n_features', [10, 100])\[email protected]('n_components', [2, 8])\ndef test_trustworthiness(input_type, n_samples, n_features, n_components):\n centers = round(n_samples*0.4)\n X, y = make_blobs(n_samples=n_samples, centers=centers,\n n_features=n_features)\n\n X_embedded = \\\n UMAP(n_components=n_components).fit_transform(X)\n X = X.astype(np.float32)\n X_embedded = X_embedded.astype(np.float32)\n\n if input_type == 'dataframe':\n gdf = cudf.DataFrame()\n for i in range(X.shape[1]):\n gdf[str(i)] = np.asarray(X[:, i], dtype=np.float32)\n\n gdf_embedded = cudf.DataFrame()\n for i in range(X_embedded.shape[1]):\n gdf_embedded[str(i)] = np.asarray(X_embedded[:, i],\n dtype=np.float32)\n\n score = cuml_trustworthiness(gdf, gdf_embedded)\n else:\n score = cuml_trustworthiness(X, X_embedded)\n\n sk_score = sklearn_trustworthiness(X, X_embedded)\n\n eps = 0.001\n assert (sk_score * (1 - eps) <= score and\n score <= sk_score * (1 + eps))\n # assert cu_score == sk_score ideally\n"
] | [
[
"sklearn.manifold.t_sne.trustworthiness",
"numpy.asarray",
"sklearn.datasets.samples_generator.make_blobs"
]
] |
MikeXydas/MDSimsEval | [
"6c32bd8b74e421120beca18d18c3e58fc8f85247"
] | [
"MDSimsEval/pca_analysis.py"
] | [
"import math\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\n\nfrom tqdm import tqdm\n\n\ndef scree_plot(analysis_actors_dict, dir_path, pcs_on_scree_plot=50, variance_ratio_line=0.75):\n \"\"\"\n Creates a plot with the scree plots for each ligand and saves it on the specified ``dir_path``. With blue color is\n class 1 and with orange color class 2.\n\n Args:\n analysis_actors_dict: ``{ \"Agonists\": List[AnalysisActor.class], \"Antagonists\": List[AnalysisActor.class] }``\n dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)\n pcs_on_scree_plot(int): The number of the first PCs that will be used on the scree plots\n variance_ratio_line(float): Float from 0.0 to 1.0 which specifies the variance ratio that a vertical line will\n be plotted\n\n \"\"\"\n # Get the dimensions of the final plot\n plot_cols = 3\n plot_rows = math.ceil(len(analysis_actors_dict['Agonists']) + len(analysis_actors_dict['Antagonists']) / plot_cols)\n\n fig = plt.figure(figsize=(18, 6 * plot_rows))\n plot_index = 1\n\n # Agonists Iteration\n for which_ligand in analysis_actors_dict['Agonists']:\n ax = fig.add_subplot(plot_rows, plot_cols, plot_index)\n plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],\n ls='--', c='grey', label=f\"Reached {int(variance_ratio_line * 100)}% variance\")\n plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),\n which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label=\"Variance Ratio\")\n plt.ylabel(\"Variance\")\n plt.xlabel(\"#PC\")\n plt.title(which_ligand.drug_name)\n plt.legend()\n plot_index += 1\n\n # Antagonists Iteration\n for which_ligand in analysis_actors_dict['Antagonists']:\n ax = fig.add_subplot(plot_rows, plot_cols, plot_index)\n plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],\n ls='--', c='grey', label=f\"Reached {int(variance_ratio_line * 100)}% variance\")\n plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),\n which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label=\"Variance\", color='orange')\n plt.ylabel(\"Variance\")\n plt.xlabel(\"#PC\")\n plt.title(which_ligand.drug_name)\n plt.legend()\n plot_index += 1\n\n fig.suptitle('PCA Scree Plots\\nAgonists: Blue\\nAntagonists: Orange', fontsize=26, y=0.93)\n\n plt.savefig(f'{dir_path}pca_scree_plots.png', format='png')\n\n\ndef populate_variance_showcase_df(analysis_actors_dict, drug_type):\n \"\"\"\n Creates a DataFrame having for each drug the number of PCs needed in order to have 50%, 75% and 95% variance\n\n Args:\n analysis_actors_dict: ``{ \"Agonists\": List[AnalysisActor.class], \"Antagonists\": List[AnalysisActor.class] }``\n drug_type (str): The class name ('Agonists' or 'Antagonists')\n\n Returns:\n pd.DataFrame: A DataFrame with columns ``['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance']``\n \"\"\"\n inp_df = pd.DataFrame(columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])\n for which_ligand in analysis_actors_dict[drug_type]:\n pca_var_row = pd.DataFrame([[\n which_ligand.drug_name,\n drug_type,\n np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.5)[0][0] + 1,\n # We +1 since the np.where will return\n np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.75)[0][0] + 1,\n # the 0 based index of the PC\n np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.95)[0][0] + 1]\n ], columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])\n inp_df = inp_df.append(pca_var_row, ignore_index=True)\n\n return inp_df\n\n\ndef project_pca_on_2d(analysis_actors_dict, drug_type, dir_path):\n \"\"\"\n Plots the 2d projection on the first two PCs of the atom space. The colorbar expresses the progression\n of the frames (color0 -> frame0, color1 -> last_frame).\n The plot is shown inside the function but if need can be easily be changed to return it.\n\n Args:\n analysis_actors_dict: ``{ \"Agonists\": List[AnalysisActor.class], \"Antagonists\": List[AnalysisActor.class] }``\n drug_type (str): 'Agonists' or 'Antagonists'\n dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)\n\n \"\"\"\n cols = 3\n rows = math.ceil(len(analysis_actors_dict[drug_type]) / cols)\n\n fig = plt.figure(figsize=(18, 25))\n plot_index = 1\n\n for which_ligand in tqdm(analysis_actors_dict[drug_type], desc=\"Projecting \" + drug_type):\n pca_space_2D = which_ligand.pca_res.transform(\n which_ligand.pca_xyz) # Transform on the atom selection that PCA was fitted\n step = 1 # Frames we are skipping for computational reasons (if step == 1 then no frame is skipped)\n\n # Scatter Plotting\n ax = fig.add_subplot(rows, cols, plot_index)\n plt.scatter(pca_space_2D[::step, 0], pca_space_2D[::step, 1],\n c=np.arange(len(pca_space_2D) / step) / (len(pca_space_2D) / step), marker='o')\n plt.xlabel('PC1')\n plt.ylabel('PC2')\n explained_variance_2PC = which_ligand.pca_res.explained_variance_ratio_[0] + \\\n which_ligand.pca_res.explained_variance_ratio_[1]\n plt.title(f'{which_ligand.drug_name} | Structural Motion Variance: {explained_variance_2PC}')\n plt.colorbar() # Add the colorbar which goes from color0 to color1 as frames progress\n plot_index += 1\n\n fig.suptitle(f'PCA 2D Projection of {drug_type} as frames progress', fontsize=26, y=1.03)\n plt.tight_layout()\n\n plt.savefig(f'{dir_path}pca_{drug_type}_2d_projection.png', format='png')\n\n return None\n\n\ndef sort_residues_by_loadings(ligand, variance_explained=0.5):\n \"\"\"\n Having as an input **a ligand** find the loadings of each residue and return them in descending order.\n The method combines first k PCs where k is defined by the variance_explained argument.\n\n Args:\n ligand(AnalysisActor.class): An AnalysisActor object in which PCA is calculated\n variance_explained (float): Defines which PCs will be combined to calcualte the final loadings\n\n Returns:\n pd.DataFrame where ResidueId is the index and each row contains the loadings of the residue\n \"\"\"\n pca_res = ligand.get_pca()\n\n # How many pcs we need to cover variance_explained\n pcs_numb = np.where(np.cumsum(pca_res.explained_variance_ratio_) > variance_explained)[0][0] + 1\n\n # Calculate loadings using loadings = eigenvectors @ sqrt(eigenvalues)\n loadings = np.abs(pca_res.components_[:pcs_numb, :]).T @ np.sqrt(pca_res.explained_variance_[:pcs_numb])\n\n # Go from 3 * #residues columns to #residues columns, combining the 3 axes\n residue_loading = np.add.reduceat(loadings, range(0, len(loadings), 3))\n\n return pd.DataFrame(enumerate(residue_loading), columns=['ResidueId', ligand.drug_name]).set_index('ResidueId')\n\n\ndef loadings_heatmap(analysis_actors_dict, dir_path, explained_variance=0.75):\n \"\"\"\n | Creates a heatmap of the loadings of the residues for all the ligands. The blue line separates Class 1 fromClass 2\n |\n\n .. figure:: ../_static/pca_loadings_heatmap.png\n :width: 550\n :align: center\n :height: 500px\n :alt: pca loadings heatmap missing\n\n PCA Loadings Heatmap, click for higher resolution.\n\n Args:\n analysis_actors_dict: ``{ \"Agonists\": List[AnalysisActor.class], \"Antagonists\": List[AnalysisActor.class] }``\n dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)\n explained_variance(float 0.0 - 1.0): Defines the number of PCs that will be used for the loadings calculation\n\n \"\"\"\n loadings_df = sort_residues_by_loadings(analysis_actors_dict['Agonists'][0], explained_variance)\n\n # Join all the loadings of each ligand\n for which_ligand in analysis_actors_dict['Agonists'][1:]:\n loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))\n for which_ligand in analysis_actors_dict['Antagonists'][1:]:\n loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))\n\n fig, ax = plt.subplots(figsize=(20, 15))\n\n sns.heatmap(loadings_df) # Seaborn heatmap of the loadings\n plt.axvline(len(analysis_actors_dict['Agonists'])) # Vertical line spearating agonists from antagonists\n\n ax.axis('tight')\n ax.set(xticks=np.arange(len(loadings_df.columns)), xticklabels=loadings_df.columns,\n yticks=np.arange(0, len(loadings_df.index), 10), yticklabels=np.arange(0, len(loadings_df.index), 10))\n plt.xticks(rotation=45)\n\n plt.xlabel('Ligand', fontsize=18)\n plt.ylabel('Residue Id', fontsize=18)\n plt.title(f\"Heatmap of Loadings of each ligand | Explained Variance: {int(explained_variance * 100)}%\", fontsize=18)\n plt.tight_layout()\n\n plt.savefig(f'{dir_path}pca_loadings_heatmap.png', format='png')\n\n return None\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.cumsum",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"numpy.abs",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel"
]
] |
sudohainguyen/MONAI | [
"a42b563acf0c7504cee18ee84c8af2eff6e948a7"
] | [
"tests/test_rand_spatial_crop_samples.py"
] | [
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom parameterized import parameterized\nfrom monai.transforms import RandSpatialCropSamples\n\nTEST_CASE_1 = [\n {\"roi_size\": [3, 3, 3], \"num_samples\": 4, \"random_center\": True},\n np.random.randint(0, 2, size=[3, 3, 3, 3]),\n (3, 3, 3, 3),\n]\n\nTEST_CASE_2 = [\n {\"roi_size\": [3, 3, 3], \"num_samples\": 8, \"random_center\": False},\n np.random.randint(0, 2, size=[3, 3, 3, 3]),\n (3, 3, 3, 3),\n]\n\n\nclass TestRandSpatialCropSamples(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2])\n def test_shape(self, input_param, input_data, expected_shape):\n result = RandSpatialCropSamples(**input_param)(input_data)\n for item in result:\n self.assertTupleEqual(item.shape, expected_shape)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.randint"
]
] |
pau557/dimod | [
"d3c6d3abf23182b035e1100c46f7c947202edefb"
] | [
"dimod/generators/chimera.py"
] | [
"# Copyright 2018 D-Wave Systems Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# =============================================================================\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport numpy.random\n\nfrom dimod.binary_quadratic_model import BinaryQuadraticModel\nfrom dimod.decorators import graph_argument\nfrom dimod.vartypes import SPIN\n\n__all__ = ['chimera_anticluster']\n\n\n@graph_argument('subgraph', allow_None=True)\ndef chimera_anticluster(m, n=None, t=4, multiplier=3.0,\n cls=BinaryQuadraticModel, subgraph=None, seed=None):\n \"\"\"Generate an anticluster problem on a Chimera lattice.\n\n An anticluster problem has weak interactions within a tile and strong\n interactions between tiles.\n\n Args:\n m (int):\n Number of rows in the Chimera lattice.\n\n n (int, optional, default=m):\n Number of columns in the Chimera lattice.\n\n t (int, optional, default=t):\n Size of the shore within each Chimera tile.\n\n multiplier (number, optional, default=3.0):\n Strength of the intertile edges.\n\n cls (type, optional):\n Binary quadratic model class to build from. Default is\n :class:`.BinaryQuadraticModel`.\n\n subgraph (int/tuple[nodes, edges]/list[edge]/:obj:`~networkx.Graph`):\n A subgraph of a Chimera(m, n, t) graph to build the anticluster\n problem on.\n\n seed (int, optional, default=None):\n Random seed.\n\n Returns:\n :obj:`.BinaryQuadraticModel`: spin-valued binary quadratic model.\n\n \"\"\"\n if seed is None:\n seed = numpy.random.randint(2**32, dtype=np.uint32)\n r = numpy.random.RandomState(seed)\n\n m = int(m)\n if n is None:\n n = m\n else:\n n = int(n)\n t = int(t)\n\n ldata = np.zeros(m*n*t*2) # number of nodes\n\n if m and n and t:\n inrow, incol = zip(*_iter_chimera_tile_edges(m, n, t))\n\n if m > 1 or n > 1:\n outrow, outcol = zip(*_iter_chimera_intertile_edges(m, n, t))\n else:\n outrow = outcol = tuple()\n\n qdata = r.choice((-1., 1.), size=len(inrow)+len(outrow))\n\n qdata[len(inrow):] *= multiplier\n\n irow = inrow + outrow\n icol = incol + outcol\n\n else:\n irow = icol = qdata = tuple()\n\n bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)\n\n if subgraph is not None:\n nodes, edges = subgraph\n\n subbqm = cls.empty(SPIN)\n\n try:\n subbqm.add_variables_from((v, bqm.linear[v]) for v in nodes)\n\n except KeyError:\n msg = \"given 'subgraph' contains nodes not in Chimera({}, {}, {})\".format(m, n, t)\n raise ValueError(msg)\n\n try:\n subbqm.add_interactions_from((u, v, bqm.adj[u][v]) for u, v in edges)\n except KeyError:\n msg = \"given 'subgraph' contains edges not in Chimera({}, {}, {})\".format(m, n, t)\n raise ValueError(msg)\n\n bqm = subbqm\n\n return bqm\n\n\ndef _iter_chimera_tile_edges(m, n, t):\n hoff = 2 * t\n voff = n * hoff\n mi = m * voff\n ni = n * hoff\n\n # tile edges\n for edge in ((k0, k1)\n for i in range(0, ni, hoff)\n for j in range(i, mi, voff)\n for k0 in range(j, j + t)\n for k1 in range(j + t, j + 2 * t)):\n yield edge\n\n\ndef _iter_chimera_intertile_edges(m, n, t):\n hoff = 2 * t\n voff = n * hoff\n mi = m * voff\n ni = n * hoff\n\n # horizontal edges\n for edge in ((k, k + hoff)\n for i in range(t, 2 * t)\n for j in range(i, ni - hoff, hoff)\n for k in range(j, mi, voff)):\n yield edge\n\n # vertical edges\n for edge in ((k, k + voff)\n for i in range(t)\n for j in range(i, ni, hoff)\n for k in range(j, mi - voff, voff)):\n yield edge\n"
] | [
[
"numpy.zeros"
]
] |
chatto-hub-test2/Spaceboy2 | [
"7b6b91baf06290e6b047ae75e7ea61cee4846b3a"
] | [
"chatto_transform/datastores/sqlalchemy_datastore.py"
] | [
"import pandas\nfrom ..schema.schema_base import *\nfrom .datastore_base import DataStore\nfrom .odo_datastore import OdoDataStore\nfrom ..config import config\n\nfrom functools import lru_cache, partial\n\nfrom sqlalchemy import Table, MetaData, select\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.automap import automap_base\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.compiler import compiles\nfrom sqlalchemy.sql.expression import Select, and_\nfrom sqlalchemy import sql\n\nimport io\nimport tempfile\nimport time\nimport os\nimport datetime\nimport ciso8601\nimport odo\n\nmetadatas = {}\n\ndef get_engine_metadata(engine):\n if engine in metadatas:\n return metadatas[engine]\n else:\n metadata = MetaData()\n metadata.bind = engine\n metadatas[engine] = metadata\n return metadata\n\ndef get_reflected_metadata(engine, schema_name=None):\n metadata = MetaData()\n metadata.reflect(bind=engine, schema=schema_name)\n metadata.bind = engine\n return metadata\n\n########################################################################\n\nfor col_type in [dt, delta, num, bool_]:\n col_type._storage_target_registry['sqlalchemy'] = col_type._storage_target_registry['pandas'].copy()\n\[email protected]_check('sqlalchemy')\ndef _(col):\n return col.dtype == 'object' \n\[email protected]_transform('sqlalchemy')\ndef _(col):\n return col.astype('object')\n\n@id_.register_check('sqlalchemy')\ndef _(col):\n return col.dtype == 'object'\n\n@id_.register_transform('sqlalchemy')\ndef _(col):\n return col.astype('object')\n\n########################################################################\n\[email protected]_metadata('sqlalchemy')\ndef _(self):\n return sql.schema.Column(self.name, sql.sqltypes.Text, nullable=True)\n\n@id_.register_metadata('sqlalchemy')\ndef _(self):\n return sql.schema.Column(self.name, sql.sqltypes.Integer, nullable=True)\n\[email protected]_metadata('sqlalchemy')\ndef _(self):\n return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)\n\[email protected]_metadata('sqlalchemy')\ndef _(self):\n return sql.schema.Column(self.name, sql.sqltypes.Interval, nullable=True)\n\n@big_dt.register_metadata('sqlalchemy')\ndef _(self):\n return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)\n\[email protected]_metadata('sqlalchemy')\ndef _(self):\n return sql.schema.Column(self.name, sql.sqltypes.Float, nullable=True)\n\n@bool_.register_metadata('sqlalchemy')\ndef _(self):\n return sql.schema.Column(self.name, sql.sqltypes.Boolean, nullable=True)\n\n########################################################################\n\n@lru_cache()\ndef schema_as_table(schema, engine):\n if schema.options.get('temporary', False):\n prefixes = ['TEMPORARY']\n else:\n prefixes = []\n\n db_schema = schema.options.get('db_schema', None)\n metadata = get_engine_metadata(engine)\n\n return Table(schema.name, metadata, *[col.metadata('sqlalchemy') for col in schema.cols], schema=db_schema, prefixes=prefixes)\n\nsa_type_2_col_type = {\n sql.sqltypes.Integer: num,\n sql.sqltypes.String: cat,\n sql.sqltypes.Date: dt,\n sql.sqltypes.DateTime: dt,\n sql.sqltypes.Interval: delta,\n sql.sqltypes.Numeric: num,\n sql.sqltypes.Boolean: bool_\n}\n\ndef table_as_schema(table):\n schema_cols = []\n for sa_col in table.c:\n for sa_type, col_type in sa_type_2_col_type.items():\n if isinstance(sa_col.type, sa_type):\n if isinstance(sa_col.type, sql.sqltypes.Integer) and (sa_col.primary_key or sa_col.foreign_keys):\n schema_cols.append(id_(sa_col.name))\n else:\n schema_cols.append(col_type(sa_col.name))\n break\n options = {}\n if table.schema is not None:\n options['db_schema'] = table.schema\n s = Schema(table.name, schema_cols, options=options)\n return s\n\n########################################################################\n\ndef fast_sql_to_df(table, schema):\n engine = table.bind\n\n if engine.dialect.name == 'mysql':\n return fast_mysql_to_df(table, schema)\n elif engine.dialect.name == 'postgresql':\n return fast_postgresql_to_df(table, schema)\n\n ods = OdoDataStore(schema, table)\n df = ods.load()\n df = df[schema.col_names()]\n return df\n\ndef fast_mysql_to_df(table, schema):\n f = tempfile.NamedTemporaryFile('w', suffix='.csv', dir=config.data_dir+'tmp')\n try:\n f.close()\n table_name = str(table)\n if not isinstance(table, Table):\n table_name = '({})'.format(table_name)\n\n # converting to csv\n sql = \"\"\"SELECT {cols} FROM {table} INTO OUTFILE '{filename}'\n FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '\"'\n ESCAPED BY '\\\\\\\\'\n LINES TERMINATED BY '\\n'\"\"\".format(\n cols=', '.join('`'+colname+'`' for colname in schema.col_names()),\n filename=f.name,\n table=table_name)\n\n table.bind.execute(sql)\n \n # reading csv\n df = pandas.read_csv(f.name, header=None, names=schema.col_names(), na_values=['\\\\N'])\n finally:\n os.remove(f.name)\n\n for col in schema.cols:\n if isinstance(col, dt):\n # converting datetime column\n df[col.name] = pandas.to_datetime(df[col.name], format=\"%Y-%m-%d %H:%M:%S\", coerce=True)\n if isinstance(col, big_dt):\n # converting big_dt column\n strptime = datetime.datetime.strptime\n parse_func = (lambda x: strptime(x, \"%Y-%m-%d %H:%M:%S\"))\n df[col.name] = df[col.name].map(parse_func, na_action='ignore')\n return df\n\ndef fast_postgresql_to_df(table, schema):\n engine = table.bind\n conn = engine.raw_connection()\n with conn.cursor() as cur:\n with io.StringIO() as f:\n table_name = str(table)\n if not isinstance(table, Table):\n table_name = '({})'.format(table_name)\n sql = \"COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)\".format(\n table_name=table_name)\n cur.copy_expert(sql, f)\n\n f.seek(0)\n df = pandas.read_csv(f)\n for col in schema.cols:\n if isinstance(col, dt):\n # converting datetime column\n df[col.name] = pandas.to_datetime(df[col.name], format=\"%Y-%m-%d %H:%M:%S\", coerce=True)\n if isinstance(col, big_dt):\n # converting big_dt column\n strptime = datetime.datetime.strptime\n parse_func = (lambda x: strptime(x, \"%Y-%m-%d %H:%M:%S\"))\n df[col.name] = df[col.name].map(parse_func, na_action='ignore')\n return df\n\ndef fast_postgresql_to_csv(table, file_path):\n engine = table.bind\n conn = engine.raw_connection()\n with conn.cursor() as cur:\n with open(file_path, 'w') as f:\n table_name = str(table)\n if not isinstance(table, Table):\n table_name = '({})'.format(table_name)\n sql = \"COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)\".format(\n table_name=table_name)\n cur.copy_expert(sql, f)\n\ndef fast_df_to_sql(df, table, schema):\n ods = OdoDataStore(schema, table, storage_target_type='sqlalchemy')\n ods.store(df)\n\nclass SATableDataStore(DataStore):\n def __init__(self, schema, engine, where_clauses=None):\n super().__init__(schema)\n self.engine = engine \n self.table = schema_as_table(self.schema, self.engine)\n self.where_clauses = where_clauses\n\n def storage_target(self):\n return 'sqlalchemy'\n\n def _load(self):\n query = self.table\n if self.where_clauses is not None:\n query = query.select()\n for where_clause in self.where_clauses:\n query = query.where(where_clause)\n\n df = fast_sql_to_df(query, self.schema)\n return df\n\n def to_csv(self, file_path):\n if self.engine.dialect.name != 'postgresql':\n raise NotImplementedError('converting directly to csv not supported for non-postgres databases')\n query = self.table\n if self.where_clauses is not None:\n query = query.select()\n for where_clause in self.where_clauses:\n query = query.where(where_clause)\n\n fast_postgresql_to_csv(query, file_path)\n\n def _store(self, df):\n if self.where_clauses is not None:\n raise NotImplementedError('Cannot store to a query (where_clauses must be left blank)')\n df = df.copy()\n fast_df_to_sql(self.table, self.schema)\n\n def _update(self, df):\n if self.where_clauses is not None:\n raise NotImplementedError('Cannot update to a query (where_clauses must be left blank)')\n df = df.copy()\n\n with self.engine.connect() as conn:\n temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)\n temp_schema.options['temporary'] = True\n temp_table = schema_as_table(temp_schema, self.engine)\n\n print('storing new df in temp table')\n fast_df_to_sql(df, temp_table, temp_schema)\n\n print('updating table from matching rows')\n index = self.schema.options['index']\n update = self.table.update(\n values={\n col_name: temp_table.c[col_name] for col_name in self.schema.col_names()\n },\n whereclause=self.table.c[index] == temp_table.c[index]\n )\n update_res = conn.execute(update)\n\n print('inserting new rows into table')\n exists_query = self.table.select().where(self.table.c[index] == temp_table.c[index]).exists()\n\n insert = self.table.insert().from_select(\n temp_schema.col_names(),\n temp_table.select().where(~exists_query))\n ins_res = conn.execute(insert)\n\n def delete(self):\n if self.where_clauses is not None:\n raise NotImplementedError('Cannot delete a query (where_clauses must be left blank)')\n \n self.table.drop(self.engine)\n\n\nclass SAJoinDataStore(DataStore):\n def __init__(self, root_schema, engine, has_schemas=None, belongs_to_schemas=None, root_conditions=None, where_clauses=None):\n self.engine = engine\n self.root_schema = root_schema\n self.root_table = schema_as_table(self.root_schema, self.engine)\n \n self.has_schemas, self.has_join_conditions = self._parse_schema_list(has_schemas)\n self.has_tables = [schema_as_table(h_schema, self.engine) for h_schema in self.has_schemas]\n\n self.belongs_to_schemas, self.belongs_to_join_conditions = self._parse_schema_list(belongs_to_schemas)\n self.belongs_to_tables = [schema_as_table(b_schema, self.engine) for b_schema in self.belongs_to_schemas]\n\n self.root_conditions = root_conditions\n self.where_clauses = where_clauses\n\n schema = Schema.union([self.root_schema] + self.has_schemas + self.belongs_to_schemas, with_prefix=True, schema_name=self.root_schema.name+'_join')\n super().__init__(schema)\n\n def _parse_schema_list(self, schema_list=None):\n if schema_list is None:\n schema_list = []\n schemas = []\n join_conditions = {}\n for schema in schema_list:\n if isinstance(schema, tuple):\n schema, j_c = schema\n join_conditions[schema] = j_c\n schemas.append(schema)\n return schemas, join_conditions\n\n def storage_target(self):\n return 'sqlalchemy'\n\n def _load(self):\n root = self.root_table\n if self.root_conditions is not None:\n root = root.select().where(and_(*self.root_conditions)).alias()\n join_clause = root\n\n select_clause = []\n root_col_prefix = self.root_schema.options['prefix']\n for col in root.c:\n select_clause.append(col.label(\"{}.{}\".format(root_col_prefix, col.name)))\n\n for h_table, h_schema in zip(self.has_tables, self.has_schemas):\n col_prefix = h_schema.options['prefix']\n h_join_conditions = [root.c.id == h_table.c['{}_id'.format(root_col_prefix)]]\n for join_condition in self.has_join_conditions.get(h_schema, []):\n h_join_conditions.append(join_condition)\n join_clause = join_clause.outerjoin(h_table, and_(*h_join_conditions))\n \n for col in h_table.c:\n select_clause.append(col.label(\"{}.{}\".format(col_prefix, col.name)))\n\n for b_table, b_schema in zip(self.belongs_to_tables, self.belongs_to_schemas):\n col_prefix = b_schema.options['prefix']\n \n b_join_conditions = [root.c['{}_id'.format(col_prefix)] == b_table.c.id]\n for join_condition in self.belongs_to_join_conditions.get(b_schema, []):\n b_join_conditions.append(join_condition)\n join_clause = join_clause.outerjoin(b_table, and_(*b_join_conditions))\n \n for col in b_table.c:\n select_clause.append(col.label(\"{}.{}\".format(col_prefix, col.name)))\n\n temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)\n temp_table = schema_as_table(temp_schema, self.engine) \n try:\n temp_table.create(self.engine)\n\n query = select(select_clause).select_from(join_clause)\n if self.where_clauses is not None:\n query = query.where(and_(*self.where_clauses))\n\n insert = temp_table.insert().from_select(temp_schema.col_names(), query)\n\n start = time.time()\n \n print('executing join into temp table')\n self.engine.execute(insert)\n joined = time.time()\n\n print('loading rows from temp table')\n df = fast_sql_to_df(temp_table, temp_schema)\n loaded = time.time()\n finally:\n temp_table.drop(self.engine)\n\n print('type checking and sorting')\n \n print('took', joined - start, 'seconds to perform the join')\n print('took', loaded - joined, 'seconds to load the results')\n \n return df\n\nclass SAQueryDataStore(DataStore):\n def __init__(self, schema, engine, query):\n self.engine = engine\n self.query = query\n self.schema = schema\n\n def _load(self):\n df = pandas.read_sql(self.query, self.engine)\n \n return df\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.read_sql"
]
] |
ml-evs/ilustrado | [
"3121ecaff9cb517f3946b2283bf50dce499caad9"
] | [
"ilustrado/util.py"
] | [
"# coding: utf-8\n\n\"\"\" Catch-all file for utility functions.\n\n\"\"\"\n\nimport sys\nimport logging\n\nimport numpy as np\nfrom matador.compute import ComputeTask\nfrom matador.utils.cell_utils import cart2frac, cart2abc\n\nLOG = logging.getLogger(\"ilustrado\")\nLOG.setLevel(logging.DEBUG)\n\n\ndef strip_useless(doc, to_run=False):\n \"\"\" Strip useless information from a matador doc.\n\n Parameters:\n doc (dict): structure to strip information from.\n\n Arguments:\n to_run (bool): whether the structure needs to be rerun,\n i.e. whether to delete data from previous run.\n\n Returns:\n dict: matador document stripped of useless keys\n\n \"\"\"\n stripped_doc = dict()\n if to_run:\n keys = [\n \"source\",\n \"parents\",\n \"mutations\",\n \"elems\",\n \"stoichiometry\",\n \"lattice_abc\",\n \"lattice_cart\",\n \"positions_frac\",\n \"num_atoms\",\n \"atom_types\",\n ]\n else:\n keys = [\n \"source\",\n \"parents\",\n \"mutations\",\n \"elems\",\n \"stoichiometry\",\n \"lattice_abc\",\n \"lattice_cart\",\n \"cell_volume\",\n \"space_group\",\n \"positions_frac\",\n \"num_atoms\",\n \"atom_types\",\n \"enthalpy\",\n \"enthalpy_per_atom\",\n \"total_energy\",\n \"total_energy_per_atom\",\n \"pressure\",\n \"max_force_on_atom\",\n \"optimised\",\n \"date\",\n \"total_time_hrs\",\n \"peak_mem_MB\",\n ]\n\n for key in keys:\n if key in doc:\n stripped_doc[key] = doc[key]\n if isinstance(doc[key], np.ndarray):\n stripped_doc[key] = doc[key].tolist()\n return stripped_doc\n\n\nclass FakeComputeTask(ComputeTask):\n \"\"\" Fake Relaxer for testing, with same parameters as the real one\n from matador.compute.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.structure = kwargs[\"res\"]\n self.output_queue = kwargs[\"output_queue\"]\n\n def relax(self):\n fake_number_crunch = True\n if fake_number_crunch:\n size = np.random.randint(low=3, high=50)\n array = np.random.rand(size, size)\n np.linalg.eig(array)\n self.structure[\"enthalpy_per_atom\"] = -505 + np.random.rand()\n self.structure[\"enthalpy\"] = self.structure[\"enthalpy_per_atom\"] * self.structure[\"num_atoms\"]\n if np.random.rand() < 0.8:\n self.structure[\"optimised\"] = True\n else:\n self.structure[\"optimised\"] = False\n self.output_queue.put(self.structure)\n\n\nclass NewbornProcess:\n \"\"\" Simple container of process data. \"\"\"\n\n def __init__(self, newborn_id, node, process, ncores=None):\n self.newborn_id = newborn_id\n self.node = node\n self.process = process\n self.ncores = ncores\n\n\nclass AseRelaxation:\n \"\"\" Perform a variable cell relaxation with ASE,\n using a predefined calculator.\n\n \"\"\"\n def __init__(self, doc, queue, calculator=None):\n \"\"\" Initialise a relaxation with ASE.\n\n Parameters:\n doc (dict): the structure to optimise.\n queue (mp.Queue): the queue to push the result to.\n\n Keyword arguments:\n calculator (ase.Calculator): the calculator object\n to use for force/energy computation. Default is\n LennardJones.\n\n \"\"\"\n from copy import deepcopy\n from matador.utils.viz_utils import doc2ase\n from ase.constraints import UnitCellFilter\n\n if calculator is None:\n from ase.calculators.lj import LennardJones\n self.calc = LennardJones()\n else:\n self.calc = calculator\n\n self.doc = deepcopy(doc)\n self.atoms = doc2ase(doc)\n self.atoms.set_calculator(self.calc)\n self.ucf = UnitCellFilter(self.atoms)\n self.queue = queue\n\n def relax(self):\n from ase.optimize import LBFGS\n\n cached = sys.__stdout__\n try:\n optimizer = LBFGS(self.ucf)\n optimizer.logfile = None\n optimised = optimizer.run(fmax=0.05, steps=100)\n except Exception:\n optimised = False\n\n self.doc[\"optimised\"] = bool(optimised)\n self.doc[\"positions_abs\"] = self.atoms.get_positions().tolist()\n self.doc[\"lattice_cart\"] = self.atoms.get_cell().tolist()\n self.doc[\"lattice_abc\"] = cart2abc(self.doc[\"lattice_cart\"])\n self.doc[\"positions_frac\"] = cart2frac(self.doc[\"lattice_cart\"], self.doc[\"positions_abs\"])\n self.doc[\"enthalpy_per_atom\"] = float(self.calc.results[\"energy\"] / len(\n self.doc[\"atom_types\"]\n ))\n self.doc[\"enthalpy\"] = float(self.calc.results[\"energy\"])\n self.queue.put(self.doc)\n sys.stdout = cached\n"
] | [
[
"numpy.random.randint",
"numpy.random.rand",
"numpy.linalg.eig"
]
] |
Criscraft/pytorch_classification | [
"d5772963e55ce218ae4719fb7f85604263aab65f"
] | [
"pytorchtools/ptnetworks/ResNetCIFAR.py"
] | [
"from collections import OrderedDict \nimport torch\nfrom torch import Tensor\nimport torch.nn as nn\nfrom torch.utils.model_zoo import load_url as load_state_dict_from_url\nfrom ptnetworks.ActivationTracker import ActivationTracker\nfrom typing import Type, Any, Callable, Union, List, Optional\n\n\nclass ResNetCIFAR(nn.Module):\n def __init__(self,\n variant='resnet050', \n n_classes=100, \n pretrained=False, \n freeze_features_until='', #exclusive\n no_gradient_required=False,\n enforce_batchnorm_requires_gradient=False,\n n_layers_to_be_removed_from_blocks=[],\n no_classifier=False,\n activation='relu',\n init_mode='kaiming_normal',\n statedict='',\n strict_loading=True):\n super().__init__()\n\n arg_dict = {\n 'pretrained' : pretrained,\n 'num_classes' : n_classes,\n 'init_mode' : init_mode,\n 'activation' : activation,\n }\n\n if variant == 'resnet018':\n self.embedded_model = resnet18(**arg_dict)\n elif variant == 'resnet034':\n self.embedded_model = resnet34(**arg_dict)\n elif variant == 'resnet050':\n self.embedded_model = resnet50(**arg_dict)\n elif variant == 'resnet101':\n self.embedded_model = resnet101(**arg_dict)\n elif variant == 'resnet152':\n self.embedded_model = resnet152(**arg_dict)\n elif variant == 'resnext050_32x4d':\n self.embedded_model = resnext50_32x4d(**arg_dict)\n elif variant == 'resnext101_32x8d':\n self.embedded_model = resnext101_32x8d(**arg_dict)\n elif variant == 'wide_resnet050_2':\n self.embedded_model = wide_resnet50_2(**arg_dict)\n elif variant == 'wide_resnet101_2':\n self.embedded_model = wide_resnet101_2(**arg_dict)\n else:\n print('select valid model variant')\n\n if no_classifier:\n self.embedded_model.classifier = nn.Identity()\n\n module_dict = OrderedDict([\n ('classifier', self.embedded_model.classifier),\n ('layer4', self.embedded_model.layer4),\n ('layer3', self.embedded_model.layer3),\n ('layer2', self.embedded_model.layer2),\n ('layer1', self.embedded_model.layer1),\n ])\n \n if freeze_features_until:\n for param in self.embedded_model.parameters():\n param.requires_grad = False\n \n if freeze_features_until not in module_dict:\n raise ValueError(\"freeue_features_until does not match any network module\")\n \n for key, module in module_dict.items():\n for param in module.parameters():\n param.requires_grad = True\n if freeze_features_until == key:\n break\n\n if n_layers_to_be_removed_from_blocks:\n modules = [\n self.embedded_model.layer1,\n self.embedded_model.layer2,\n self.embedded_model.layer3,\n self.embedded_model.layer4,\n ]\n for n_layers, layer in zip(n_layers_to_be_removed_from_blocks, modules):\n for i in range(n_layers):\n layer[-i-1] = nn.Identity()\n\n if statedict:\n pretrained_dict = torch.load(statedict, map_location=torch.device('cpu'))\n missing = self.load_state_dict(pretrained_dict, strict=strict_loading)\n print('Loading weights from statedict. Missing and unexpected keys:')\n print(missing)\n\n if enforce_batchnorm_requires_gradient:\n for m in self.embedded_model.modules():\n if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n for param in m.parameters():\n param.requires_grad = True\n\n if no_gradient_required:\n for param in self.embedded_model.parameters():\n param.requires_grad = False\n \n def forward(self, batch):\n if isinstance(batch, dict) and 'data' in batch:\n logits = self.embedded_model(batch['data'])\n out = {'logits' : logits}\n return out\n else:\n return self.embedded_model(batch)\n\n def forward_features(self, batch, module=None):\n track_modules = ActivationTracker()\n\n assert isinstance(batch, dict) and 'data' in batch\n logits, activation_dict = track_modules.collect_stats(self.embedded_model, batch['data'], module)\n out = {'logits' : logits, 'activations' : activation_dict}\n return out\n \n def save(self, statedict_name):\n torch.save(self.state_dict(), statedict_name)\n\n \nMODEL_DIR = '/nfshome/linse/NO_INB_BACKUP/ModelZoo'\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\n\ndef conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion: int = 1\n\n def __init__(\n self,\n inplanes: int,\n planes: int,\n stride: int = 1,\n downsample: Optional[nn.Module] = None,\n groups: int = 1,\n base_width: int = 64,\n dilation: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n activation_layer=nn.ReLU\n ) -> None:\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu_1 = activation_layer(inplace=False)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.relu_2 = activation_layer(inplace=False)\n self.stride = stride\n\n def forward(self, x: Tensor) -> Tensor:\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu_1(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu_2(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)\n # while original implementation places the stride at the first 1x1 convolution(self.conv1)\n # according to \"Deep residual learning for image recognition\"https://arxiv.org/abs/1512.03385.\n # This variant is also known as ResNet V1.5 and improves accuracy according to\n # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.\n\n expansion: int = 4\n\n def __init__(\n self,\n inplanes: int,\n planes: int,\n stride: int = 1,\n downsample: Optional[nn.Module] = None,\n groups: int = 1,\n base_width: int = 64,\n dilation: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n activation_layer=nn.ReLU\n ) -> None:\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu_1 = activation_layer(inplace=False)\n self.relu_2 = activation_layer(inplace=False)\n self.relu_3 = activation_layer(inplace=False)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x: Tensor) -> Tensor:\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu_1(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu_2(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu_3(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(\n self,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n num_classes: int = 1000,\n zero_init_residual: bool = False,\n groups: int = 1,\n width_per_group: int = 64,\n replace_stride_with_dilation: Optional[List[bool]] = None,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n init_mode='kaiming_normal',\n activation='relu',\n ) -> None:\n super().__init__()\n\n self.ID = 'ResNet'\n\n if activation == 'relu':\n activation_layer = nn.ReLU\n elif activation == 'leaky_relu':\n activation_layer = nn.LeakyReLU\n self._activation_layer = activation_layer\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n #for CIFAR we choose a kernel size of 3 in the first convolutional layer\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=3,\n bias=False)\n self.conv1.ID = self.ID + '_first_layer'\n self.bn1 = norm_layer(self.inplanes)\n self.relu = self._activation_layer(inplace=False)\n #we do not apply maxpooling after the first layer for CIFAR\n self.maxpool = nn.Identity() #nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(512 * block.expansion, num_classes)\n\n self.reinitialize(init_mode, activation, zero_init_residual)\n\n\n def reinitialize(self, init_mode, activation, zero_init_residual):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n if init_mode == 'kaiming_normal':\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity=activation)\n elif init_mode == 'kaiming_uniform':\n nn.init.kaiming_uniform_(m.weight, mode='fan_out', nonlinearity=activation)\n elif init_mode == 'sparse':\n nn.init.sparse_(m.weight, sparsity=0.1, std=0.01)\n elif init_mode == 'orthogonal':\n nn.init.orthogonal_(m.weight, gain=1)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]\n\n\n def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,\n stride: int = 1, dilate: bool = False) -> nn.Sequential:\n norm_layer = self._norm_layer\n downsample = None\n activation_layer = self._activation_layer\n previous_dilation = self.dilation\n\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer, activation_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer, activation_layer=activation_layer))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x: Tensor) -> Tensor:\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n\n return x\n\n def forward(self, x: Tensor) -> Tensor:\n return self._forward_impl(x)\n\n\ndef _resnet(\n arch: str,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n pretrained: bool,\n progress: bool,\n **kwargs: Any\n) -> ResNet:\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress, model_dir=MODEL_DIR)\n model.load_state_dict(state_dict, strict=False)\n return model\n\n\ndef resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,\n **kwargs)\n\n\n\ndef resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\n\ndef resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\n\ndef resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,\n **kwargs)\n\n\n\ndef resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,\n **kwargs)\n\n\n\ndef resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\n\ndef resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n\n\n\ndef wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\n\ndef wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)"
] | [
[
"torch.nn.init.kaiming_normal_",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.init.sparse_",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten",
"torch.nn.Conv2d",
"torch.nn.Identity",
"torch.nn.Sequential",
"torch.utils.model_zoo.load_url",
"torch.nn.init.kaiming_uniform_",
"torch.device",
"torch.nn.init.orthogonal_"
]
] |
pierg/wiseml-patterns | [
"2decf2954001296bd04261b00ae144f53359a2b8"
] | [
"gym_minigrid/extendedminigrid.py"
] | [
"from gym_minigrid.minigrid import *\nfrom configurations import config_grabber as cg\n\nimport math\nimport operator\nfrom functools import reduce\n\nimport traceback\n\nimport numpy as np\n\nconfig = cg.Configuration.grab()\n\nAGENT_VIEW_SIZE = config.agent_view_size\nEXTRA_OBSERVATIONS_SIZE = 5\nOBS_ARRAY_SIZE = (AGENT_VIEW_SIZE, AGENT_VIEW_SIZE)\n\ndef extended_dic(obj_names=[]):\n \"\"\"\n Extend the OBJECT_TO_IDX dictionaries with additional objects\n :param obj_names: list of strings\n :return: OBJECT_TO_IDX extended\n \"\"\"\n biggest_idx = list(OBJECT_TO_IDX.values())[-1]\n for key in OBJECT_TO_IDX.values():\n if key > biggest_idx:\n biggest_idx = key\n new_obj_idx = biggest_idx + 1\n for obj_name in obj_names:\n if not obj_name in OBJECT_TO_IDX.keys():\n OBJECT_TO_IDX.update({obj_name: new_obj_idx})\n new_obj_idx = new_obj_idx + 1\n\n\nextended_dic([\"water\", \"lightsw\", \"dirt\", \"vase\"])\nIDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))\n\n\nclass Room:\n\n def __init__(self, room, size, position, lightOn):\n self.number = room\n self.size = size\n self.position = position\n self.lightOn = lightOn\n\n def setLight(self, lightOn):\n self.lightOn = lightOn\n\n def setEntryDoor(self, position):\n self.entryDoor = position\n\n def setExitDoor(self, position):\n self.exitDoor = position\n\n def getLight(self):\n return self.lightOn\n\n def objectInRoom(self, position):\n ax, ay = position\n x, y = self.size\n k, l = self.position\n x += k\n y += l\n if ax <= x and ax >= k:\n if ay <= y and ay >= l:\n return True\n return False\n\n\nclass Water(WorldObj):\n def __init__(self):\n super(Water, self).__init__('water', 'blue')\n\n def can_overlap(self):\n return True\n\n def render(self, r):\n self._set_color(r)\n r.drawPolygon([\n (0, CELL_PIXELS),\n (CELL_PIXELS, CELL_PIXELS),\n (CELL_PIXELS, 0),\n (0, 0)\n ])\n\n\nclass LightSwitch(WorldObj):\n def __init__(self):\n self.is_on = False\n super(LightSwitch, self).__init__('lightsw', 'yellow')\n\n def affectRoom(self, room):\n self.room = room\n\n def setSwitchPos(self, position):\n self.position = position\n\n def elements_in_room(self, room):\n self.elements = room\n\n def toggle(self, env, pos):\n self.room.setLight(not self.room.getLight())\n self.is_on = not self.is_on\n return True\n\n def getRoomNumber(self):\n return self.room.number\n\n def can_overlap(self):\n return False\n\n def render(self, r):\n self._set_color(r)\n r.drawPolygon([\n (0, CELL_PIXELS),\n (CELL_PIXELS, CELL_PIXELS),\n (CELL_PIXELS, 0),\n (0, 0)\n ])\n self.dark_light(r)\n\n def dark_light(self, r):\n\n if self.room.getLight() == False:\n r.setColor(255, 0, 0)\n r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)\n if hasattr(self, 'cur_pos'):\n if hasattr(self, 'elements'):\n (xl, yl) = self.cur_pos\n for i in range(0, len(self.elements)):\n if self.elements[i][2] == 1:\n r.setLineColor(10, 10, 10)\n r.setColor(10, 10, 10)\n r.drawPolygon([\n (\n (self.elements[i][0] - xl) * CELL_PIXELS,\n (self.elements[i][1] - yl + 1) * CELL_PIXELS),\n ((self.elements[i][0] - xl + 1) * CELL_PIXELS,\n (self.elements[i][1] - yl + 1) * CELL_PIXELS),\n (\n (self.elements[i][0] - xl + 1) * CELL_PIXELS,\n (self.elements[i][1] - yl) * CELL_PIXELS),\n ((self.elements[i][0] - xl) * CELL_PIXELS, (self.elements[i][1] - yl) * CELL_PIXELS)\n ])\n else:\n r.setColor(0, 255, 0)\n r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)\n r.pop\n\n\nclass Dirt(WorldObj):\n def __init__(self):\n super(Dirt, self).__init__('dirt', 'yellow')\n\n def can_overlap(self):\n return True\n\n def affect_list(self, list):\n self.list = list\n\n def toggle(self, env, pos):\n x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))\n env.grid.set(x, y, None)\n del self.list[len(self.list) - 1]\n return True\n\n def render(self, r):\n self._set_color(r)\n r.setColor(240, 150, 0)\n r.setLineColor(81, 41, 0)\n r.drawPolygon([\n (0, CELL_PIXELS),\n (CELL_PIXELS, CELL_PIXELS),\n (CELL_PIXELS, 0),\n (0, 0)\n ])\n\n\nclass Vase(WorldObj):\n def __init__(self):\n super(Vase, self).__init__('vase', 'grey')\n self.content = Dirt()\n self.list = []\n\n def can_overlap(self):\n return False\n\n def toggle(self, env, pos):\n x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))\n env.grid.set(x, y, self.content)\n self.list.append(Dirt())\n self.content.affect_list(self.list)\n\n def render(self, r):\n self._set_color(r)\n r.setColor(255, 255, 255)\n QUARTER_CELL = 0.25 * CELL_PIXELS\n DEMI_CELL = 0.5 * CELL_PIXELS\n r.drawCircle(DEMI_CELL, DEMI_CELL, DEMI_CELL)\n r.drawPolygon([\n (QUARTER_CELL, 3 * QUARTER_CELL),\n (3 * QUARTER_CELL, 3 * QUARTER_CELL),\n (3 * QUARTER_CELL, QUARTER_CELL),\n (QUARTER_CELL, QUARTER_CELL)\n ])\n r.setColor(240, 150, 0)\n r.drawPolygon([\n (0.32 * CELL_PIXELS, 0.7 * CELL_PIXELS),\n (0.7 * CELL_PIXELS, 0.7 * CELL_PIXELS),\n (0.7 * CELL_PIXELS, 0.32 * CELL_PIXELS),\n (0.32 * CELL_PIXELS, 0.32 * CELL_PIXELS)\n ])\n\n def list_dirt(self, list):\n self.list = list\n\n\ndef worldobj_name_to_object(worldobj_name):\n if worldobj_name == 'water':\n return Water()\n elif worldobj_name == 'wall':\n return Wall()\n elif worldobj_name == \"lightsw\":\n return LightSwitch()\n elif worldobj_name == \"dirt\":\n return Dirt()\n elif worldobj_name == \"vase\":\n return Vase()\n elif worldobj_name == \"goal\":\n return Goal()\n else:\n return None\n\n\nclass ExGrid(Grid):\n \"\"\"\n Extending Grid methods to support the new objects\n \"\"\"\n\n # Add new worldobje that need to be decoded (Ex. water)\n def decode(array):\n \"\"\"\n Decode an array grid encoding back into a grid\n \"\"\"\n flatten_dim = array.shape[0]\n width = int(math.sqrt(flatten_dim))\n height = width\n # width = array.shape[0]\n # height = array.shape[1]\n grid = ExGrid(width, height)\n\n for j in range(0, height):\n for i in range(0, width):\n\n typeIdx = array[i, j, 0]\n colorIdx = array[i, j, 1]\n openIdx = array[i, j, 2]\n\n if typeIdx == 0:\n continue\n\n objType = IDX_TO_OBJECT[typeIdx]\n color = IDX_TO_COLOR[colorIdx]\n is_open = True if openIdx == 1 else 0\n\n if objType == 'wall':\n v = Wall(color)\n elif objType == 'ball':\n v = Ball(color)\n elif objType == 'key':\n v = Key(color)\n elif objType == 'box':\n v = Box(color)\n elif objType == 'door':\n v = Door(color, is_open)\n elif objType == 'locked_door':\n v = LockedDoor(color, is_open)\n elif objType == 'goal':\n v = Goal()\n elif objType == 'water':\n v = Water()\n elif objType == 'lightsw':\n v = LightSwitch()\n elif objType == 'dirt':\n v = Dirt()\n elif objType == 'vase':\n v = Vase()\n else:\n assert False, \"unknown obj type in decode '%s'\" % objType\n grid.set(i, j, v)\n return grid\n\n\nclass ExMiniGridEnv(MiniGridEnv):\n\n\n # Enumeration of possible actions\n class Actions(IntEnum):\n\n # Used to observe the environment in the step() before the action\n observe = -1\n\n # Action space\n left = 0\n right = 1\n forward = 2\n toggle = 3\n\n # Extra action (not used)\n pickup = 4\n drop = 5\n done = 6\n clean = 7\n\n\n def print_grid(self, grid):\n\n for i, e in enumerate(grid.grid):\n if i % grid.height == 0:\n print(\"\")\n if e is not None:\n print(str(e.type), end=\"\\t\")\n else:\n print(\"none\", end=\"\\t\")\n print(\"\")\n\n def strings_to_actions(self, actions):\n for i, action_name in enumerate(actions):\n if action_name == \"left\":\n actions[i] = self.actions.left\n elif action_name == \"right\":\n actions[i] = self.actions.right\n elif action_name == \"forward\":\n actions[i] = self.actions.forward\n elif action_name == \"toggle\":\n actions[i] = self.actions.toggle\n elif action_name == \"done\":\n actions[i] = self.actions.done\n elif action_name == \"clean\":\n actions[i] = self.actions.clean\n elif action_name == \"observe\":\n actions[i] = self.actions.observe\n\n return actions\n\n def action_to_string(self, action):\n if action == self.actions.left:\n return \"left\"\n elif action == self.actions.right:\n return \"right\"\n elif action == self.actions.forward:\n return \"forward\"\n elif action == self.actions.toggle:\n return \"toggle\"\n elif action == self.actions.done:\n return \"done\"\n elif action == self.actions.clean:\n return \"clean\"\n elif action == self.actions.observe:\n return \"observe\"\n return None\n\n\n def __init__(self, grid_size=16, max_steps=-1, see_through_walls=False, seed=1337):\n # Grab configuration\n self.config = cg.Configuration.grab()\n # Overriding the max_num_steps\n max_num_steps = max_steps\n if hasattr(self.config, 'max_num_steps'):\n max_num_steps = self.config.max_num_steps\n super().__init__(grid_size, max_num_steps, see_through_walls, seed)\n self.actions = ExMiniGridEnv.Actions\n\n \"\"\"\n Observation Space\n low: lowest element value\n high: highest element value\n shape: imgSize tuple, each element can be of a value between 'low' and 'high'\n \"\"\"\n imgSize = reduce(operator.mul, OBS_ARRAY_SIZE, 1) + EXTRA_OBSERVATIONS_SIZE\n elemSize = len(IDX_TO_OBJECT)\n self.observation_space = spaces.Box(\n low=0,\n high=elemSize,\n shape=(imgSize,),\n dtype='uint8'\n )\n\n # Restricting action_space to the first N actions\n first_n_actions_available = 4\n self.action_space = spaces.Discrete(first_n_actions_available)\n\n\n\n\n def step(self, action):\n\n self.step_count += 1\n\n reward = 0\n done = False\n\n info = {\"event\": [], \"steps_count\": self.step_count}\n\n # Get the position in front of the agent\n fwd_pos = self.front_pos\n\n # Get the contents of the cell in front of the agent\n fwd_cell = self.grid.get(*fwd_pos)\n\n\n # Rotate left\n if action == self.actions.left:\n self.agent_dir -= 1\n if self.agent_dir < 0:\n self.agent_dir += 4\n\n # Rotate right\n elif action == self.actions.right:\n self.agent_dir = (self.agent_dir + 1) % 4\n\n # Move forward\n elif action == self.actions.forward:\n if fwd_cell == None or fwd_cell.can_overlap():\n self.agent_pos = fwd_pos\n # Step into Water\n if fwd_cell is not None and fwd_cell.type == 'water':\n done = True\n reward = self.config.rewards.standard.death\n info[\"event\"].append(\"died\")\n if self.config.envelope:\n print(\"DIED!! >>>>>>> Problems with envelope!\")\n # Step into Goal\n elif fwd_cell is not None and fwd_cell.type == 'goal':\n try:\n if self.goal_enabled():\n done = True\n reward = self.config.rewards.standard.goal\n # reward = self.config.rewards.standard.goal - 0.9 * (self.step_count / self.max_steps)\n info[\"event\"].append(\"goal\")\n except:\n done = True\n reward = self.config.rewards.standard.goal\n # reward = self.config.rewards.standard.goal - 0.9 * (self.step_count / self.max_steps)\n info[\"event\"].append(\"goal\")\n\n else:\n reward = self.config.rewards.actions.forward\n\n # Pick up an object\n elif action == self.actions.pickup:\n if fwd_cell and fwd_cell.can_pickup():\n if self.carrying is None:\n self.carrying = fwd_cell\n self.carrying.cur_pos = np.array([-1, -1])\n self.grid.set(*fwd_pos, None)\n\n # Drop an object\n elif action == self.actions.drop:\n if not fwd_cell and self.carrying:\n self.grid.set(*fwd_pos, self.carrying)\n self.carrying.cur_pos = fwd_pos\n self.carrying = None\n\n # Toggle/activate an object\n elif action == self.actions.toggle:\n if fwd_cell is not None and fwd_cell.type == 'dirt':\n reward = self.config.rewards.cleaningenv.clean\n if fwd_cell:\n fwd_cell.toggle(self, fwd_pos)\n\n # Done action (not used by default)\n elif action == self.actions.done:\n pass\n\n else:\n assert False, \"unknown action\"\n\n # Adding reward for the step\n reward += self.config.rewards.standard.step\n\n if self.step_count == self.config.max_num_steps_episode:\n done = True\n\n obs = self.gen_obs()\n\n if self.config.debug_mode: print(\"reward: \" + str(reward) + \"\\tinfo: \" + str(info))\n\n return obs, reward, done, info\n\n\n def goal_enabled(self):\n raise NotImplementedError()\n\n\n def gen_obs_decoded(self):\n \"\"\"\n Generate the agent's view (partially observable, low-resolution encoding)\n \"\"\"\n grid, vis_mask = self.gen_obs_grid()\n\n if self.config.debug_mode:\n print(\"\\nAgent View Original\")\n self.print_grid(grid)\n\n \"\"\"if Perception.light_on_current_room(self):\"\"\"\n try:\n agent_pos = (AGENT_VIEW_SIZE // 2, AGENT_VIEW_SIZE - 1)\n\n obs_door_open = 0\n obs_light_on = 0\n current_room = 0\n current_room_light = 0\n next_room_light = 0\n\n if self.roomList:\n for x in self.roomList:\n # Save room number\n if x.objectInRoom(self.agent_pos):\n current_room = x.number\n current_room_light = x.getLight()\n else:\n next_room_light = x.getLight()\n\n # check if room is on the dark\n if not x.getLight():\n for j in range(0, grid.height):\n for i in range(0, grid.width):\n # pass the obs coordinates (i, j) into the absolute grid coordinates (xpos, ypos).\n xpos = agent_pos[1] - j\n ypos = i - agent_pos[0]\n (xpos, ypos) = self.get_grid_coords_from_view((xpos, ypos))\n\n # check if the object position is on the room\n if x.objectInRoom((xpos, ypos)):\n if grid.grid[(j * AGENT_VIEW_SIZE) + i] is not None:\n grid.grid[i + (j * AGENT_VIEW_SIZE)] = None\n\n for j in range(0, grid.height):\n for i in range(0, grid.width):\n\n v = grid.get(i, j)\n\n if hasattr(v, 'is_open') and v.is_open:\n obs_door_open = 1\n\n if hasattr(v, 'is_on') and v.is_on:\n obs_light_on = 1\n\n\n if self.config.debug_mode:\n print(\"\\n\\nobs_door_open\\t\\t\" + str(obs_door_open))\n print(\"obs_light_on\\t\\t\" + str(obs_light_on))\n print(\"current_room\\t\\t\" + str(current_room))\n print(\"current_room_light\\t\" + str(current_room_light*1))\n print(\"next_room_light\\t\\t\" + str(next_room_light*1) + \"\\n\\n\")\n\n\n return grid, (obs_door_open, obs_light_on, current_room, current_room_light*1, next_room_light*1)\n\n except AttributeError:\n traceback.print_exc()\n print(\"ERROR!!!\")\n\n\n\n def gen_obs(self):\n \"\"\"\n Generate the agent's view (partially observable, low-resolution encoding)\n \"\"\"\n grid, extra_observations = self.gen_obs_decoded()\n\n if self.config.debug_mode:\n print(\"\\nAgent View Retreived\")\n self.print_grid(grid)\n\n \"\"\"if Perception.light_on_current_room(self):\"\"\"\n try:\n\n array = np.zeros(shape=(grid.width, grid.height, 1), dtype='uint8')\n\n obs_door_open = 0\n obs_light_on = 0\n\n for j in range(0, grid.height):\n for i in range(0, grid.width):\n\n v = grid.get(i, j)\n\n if v == None:\n continue\n\n array[i, j, 0] = OBJECT_TO_IDX[v.type]\n\n if hasattr(v, 'is_open') and v.is_open:\n obs_door_open = 1\n\n if hasattr(v, 'is_on') and v.is_on:\n obs_light_on = 1\n\n image = array\n\n flatten_image = image.flatten()\n\n obs = np.append(flatten_image, extra_observations)\n\n return obs\n\n except AttributeError:\n traceback.print_exc()\n print(\"ERROR!!!\")\n # return super().gen_obs()\n\n\n def get_grid_coords_from_view(self, coordinates):\n \"\"\"\n Dual of \"get_view_coords\". Translate and rotate relative to the agent coordinates (i, j) into the\n absolute grid coordinates.\n Need to have tuples of integers for the position of the agent and its direction\n :param coordinates: tuples of integers (vertical,horizontal) position from the agent relative to its position\n :return : coordinates translated into the absolute grid coordinates.\n \"\"\"\n ax, ay = self.agent_pos\n ad = self.agent_dir\n x, y = coordinates\n # agent facing down\n if ad == 1:\n ax -= y\n ay += x\n # agent facing right\n elif ad == 0:\n ax += x\n ay += y\n # agent facing left\n elif ad == 2:\n ax -= x\n ay -= y\n # agent facing up\n elif ad == 3:\n ax += y\n ay -= x\n return ax, ay\n\n def worldobj_in_agent(self, front, side):\n \"\"\"\n Returns the type of the worldobject in the 'front' cells in front and 'side' cells right (positive) or left (negative)\n with respect to the agent\n :param front: integer representing the number of cells in front of the agent\n :param side: integer, if positive represents the cells to the right, negative to the left of the agent\n :return: string: worldobj type\n \"\"\"\n\n coordinates = (front, side)\n wx, wy = ExMiniGridEnv.get_grid_coords_from_view(self, coordinates)\n\n if 0 <= wx < self.grid.width and 0 <= wy < self.grid.height:\n worldobj = self.grid.get(wx, wy)\n\n if worldobj is not None:\n worldobj_type = worldobj.type\n return worldobj_type\n return None\n"
] | [
[
"numpy.array",
"numpy.append",
"numpy.zeros"
]
] |
gohyun14/Game | [
"39e6e192590059daade40c95cc177acb0f3a581b"
] | [
"codenames/players/codemaster_glove_lookahead.py"
] | [
"import scipy.spatial.distance\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem.lancaster import LancasterStemmer\nfrom math import ceil\nimport numpy as np\nimport copy\nimport itertools\n\nfrom players.codemaster import Codemaster\nTHRESHOLD = np.inf\n\nclass AICodemaster(Codemaster):\n\n def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None):\n super().__init__()\n self.brown_ic = brown_ic\n self.glove_vecs = glove_vecs\n self.word_vectors = word_vectors\n self.wordnet_lemmatizer = WordNetLemmatizer()\n self.lancaster_stemmer = LancasterStemmer()\n self.cm_wordlist = []\n with open('players/cm_wordlist.txt') as infile:\n for line in infile:\n self.cm_wordlist.append(line.rstrip())\n self.root = None\n self.turn_number = 0\n\n def set_game_state(self, words, maps):\n if self.turn_number == 0:\n self.original_words = copy.copy(words)\n print(f\"original words: {self.original_words}\")\n self.words = words\n self.maps = maps\n self.update_board()\n self.init_dists()\n self.turn_number += 1\n\n def update_board(self):\n self.red_words = set()\n self.bad_words = set()\n self.words_guessed = set()\n for i in range(25):\n if self.words[i][0] == '*':\n self.words_guessed.add(self.original_words[i].lower())\n elif self.maps[i] == \"Assassin\" or self.maps[i] == \"Blue\" or self.maps[i] == \"Civilian\":\n self.bad_words.add(self.words[i].lower())\n if self.maps[i] == \"Assassin\":\n self.black_word = self.words[i]\n else:\n self.red_words.add(self.words[i].lower())\n \n def init_dists(self):\n cos_dist = scipy.spatial.distance.cosine\n all_vectors = (self.glove_vecs,)\n self.bad_word_dists = {}\n for word in self.bad_words:\n self.bad_word_dists[word] = {}\n for val in self.cm_wordlist:\n b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))\n self.bad_word_dists[word][val] = b_dist\n\n self.red_word_dists = {}\n for word in self.red_words:\n self.red_word_dists[word] = {}\n for val in self.cm_wordlist:\n b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))\n self.red_word_dists[word][val] = b_dist\n\n def get_clue(self):\n #self.all_guesses = set()\n if self.root is None or self.root.words_guessed != self.words_guessed:\n if self.root:\n print(\"board mismatch: initializing new root\")\n print(f\"game's words guessed: {self.words_guessed} nodes' words guessed: {self.root.words_guessed}\")\n self.root = Node(self, copy.copy(self.words_guessed), None, depth = self.turn_number-1)\n self.root.get_val()\n best_clue = self.root.best_clue\n print('chosen_clue is:', best_clue[0])\n\n self.root = self.root.best_child\n return best_clue\n\n def arr_not_in_word(self, word, arr):\n if word in arr:\n return False\n lemm = self.wordnet_lemmatizer.lemmatize(word)\n lancas = self.lancaster_stemmer.stem(word)\n for i in arr:\n if i == lemm or i == lancas:\n return False\n if i.find(word) != -1:\n return False\n if word.find(i) != -1:\n return False\n return True\n\n def combine(self, words, wordvecs):\n factor = 1.0 / float(len(words))\n new_word = self.concatenate(words[0], wordvecs) * factor\n for word in words[1:]:\n new_word += self.concatenate(word, wordvecs) * factor\n return new_word\n\n def concatenate(self, word, wordvecs):\n concatenated = wordvecs[0][word]\n for vec in wordvecs[1:]:\n concatenated = np.hstack((concatenated, vec[word]))\n return concatenated\n\n\nclass Node:\n def __init__(self, codemaster, words_guessed, parent, depth = 0, best=np.inf):\n self.codemaster = codemaster\n self.words_guessed = words_guessed\n self.parent = parent\n self.depth = depth\n self.best_clue = None\n self.best_child = None\n self.val = np.inf\n self.terminal = False\n self.best = best\n\n def get_best_clues(self):\n bests = {}\n possible = {}\n cm = self.codemaster\n red_words = cm.red_words.difference(self.words_guessed)\n bad_words = cm.bad_words.difference(self.words_guessed)\n print(f\"calculating best clues\")\n #print(f\"red word dists: {self.red_word_dists}\")\n for clue_num in range(1, 3 + 1):\n best_per_dist = np.inf\n best_per = ''\n best_red_word = ''\n for red_word in list(itertools.combinations(red_words, clue_num)):\n best_word = ''\n best_dist = np.inf\n for word in cm.cm_wordlist:\n if not cm.arr_not_in_word(word, red_words.union(bad_words)):\n continue\n\n bad_dist = np.inf\n worst_bad = ''\n for bad_word in bad_words:\n if cm.bad_word_dists[bad_word][word] < bad_dist:\n bad_dist = cm.bad_word_dists[bad_word][word]\n worst_bad = bad_word\n worst_red = 0\n for red in red_word:\n dist = cm.red_word_dists[red][word]\n if dist > worst_red:\n worst_red = dist\n\n if worst_red < best_dist and worst_red < bad_dist:\n best_dist = worst_red\n best_word = word\n # print(worst_red,red_word,word)\n\n if best_dist < best_per_dist:\n best_per_dist = best_dist\n best_per = best_word\n best_red_word = red_word\n if best_dist < THRESHOLD or clue_num == 1: \n possible[(best_word, clue_num)] = (red_word, best_dist)\n bests[clue_num] = (best_red_word, best_per, best_per_dist)\n print(f\"length of possibilities: {len(possible)}\")\n return possible\n\n def add_children(self):\n cos_dist = scipy.spatial.distance.cosine\n cm = self.codemaster\n all_vectors = (cm.glove_vecs,)\n print(f\"at depth {self.depth}\")\n bests = self.get_best_clues()\n for clue, clue_info in bests.items():\n combined_clue, clue_num = clue\n best_red_word, combined_score = clue_info\n worst = -np.inf\n for word in best_red_word:\n dist = cos_dist(cm.concatenate(word, all_vectors), cm.concatenate(combined_clue, all_vectors))\n if dist > worst:\n worst = dist\n if worst < 0.7 and worst != -np.inf or clue_num == 1:\n print(f\"adding clue: {clue}\")\n self.add_child(clue, best_red_word)\n \n def check_board(self):\n cm = self.codemaster\n self.black_guessed = cm.black_word in self.words_guessed\n red_words = cm.red_words.difference(self.words_guessed)\n\n red_count = len(red_words)\n if self.black_guessed:\n self.val = np.inf\n self.terminal = True\n elif red_count == 0:\n self.val = self.depth\n self.terminal = True\n print(f\"Terminal Node: depth: {self.depth}\")\n else:\n self.val = 25\n \n def new_child(self, expected_words_chosen):\n new_words_guessed = copy.copy(self.words_guessed)\n for word in expected_words_chosen:\n new_words_guessed.add(word)\n return Node(self.codemaster, new_words_guessed, self, self.depth + 1, self.best)\n \n def get_val(self, depth=np.inf):\n # if self.words_guessed in self.codemaster.all_guesses:\n # print(\"Board State already explored\")\n # return self.val\n # self.codemaster.all_guesses.add(self.words_guessed)\n self.check_board()\n if self.not_possible():\n print(\"Skipped\")\n return self.val\n if self.terminal:\n if self.val < self.best:\n self.best = self.val\n return self.val\n if self.best_clue is not None:\n return self.val\n best_val = np.inf\n possible = self.get_best_clues()\n for clue, clue_info in sorted(possible.items(), key = lambda x: (x[0][1],-x[1][1]), reverse=True):\n combined_clue, clue_num = clue\n best_red_word, combined_score = clue_info\n if self.check_clue_feasible(clue_num, combined_score):\n print(f\"Exploring child, depth: {self.depth+1}, clue: {clue}, dist: {combined_score}\")\n child = self.new_child(best_red_word)\n child_val = child.get_val(depth)\n if child_val < best_val:\n best_val = child_val\n self.best_clue = clue\n self.best_child = child\n if child.best < self.best:\n print(f\"Found new best, prev: {self.best} new: {child.best}\")\n self.best = child.best\n self.val = best_val\n return self.val\n\n # def best_child(self):\n # best_clue = self.best_clue\n # for child_key in self.children.keys():\n # if child_key == best_clue:\n # best_child = self.children[child_key]\n # best_child.reset_depth()\n # return best_child\n\n def not_possible(self):\n red_words = self.codemaster.red_words.difference(self.words_guessed)\n best_possible = self.depth + ceil(len(red_words)/3)\n print(f\"BEST POSSIBLE: {best_possible}\")\n return self.best <= best_possible or self.depth >= self.best or (not self.terminal and self.depth == self.best - 1)\n\n def check_clue_feasible(self, clue_num, combined_score):\n return clue_num == 1 or combined_score < THRESHOLD\n # cos_dist = scipy.spatial.distance.cosine\n # cm = self.codemaster\n # all_vectors = (cm.glove_vecs,)\n # worst = -np.inf\n # for word in best_red_word:\n # dist = cos_dist(cm.concatenate(word, all_vectors), cm.concatenate(combined_clue, all_vectors))\n # if dist > worst:\n # worst = dist\n # return worst < 0.7 and worst != -np.inf or clue_num == 1\n\n\n \n"
] | [
[
"numpy.hstack"
]
] |
ATTPC/VAE-event-classification | [
"aae331d44bffffec2ca8a6cdef71208899db0052"
] | [
"src/event_representations.py"
] | [
"import numpy as np\n\ndef make_histograms(x, bins=40, interval=[1e-1, 1]):\n intervals = np.linspace(interval[0], interval[1], bins)\n flat_x = x.reshape((x.shape[0], -1))\n hist_x = np.zeros((x.shape[0], bins))\n for i in range(1, bins):\n mask = flat_x <= intervals[i]\n mask = np.logical_and(mask, flat_x > intervals[i-1])\n hist_x[:, i] = mask.sum(1)\n\n return hist_x\n\ndef make_net_count(x, **kwargs):\n flat_x = x.reshape((x.shape[0], -1))\n sum_x = flat_x.sum(1)\n return sum_x\n"
] | [
[
"numpy.logical_and",
"numpy.linspace",
"numpy.zeros"
]
] |
handongke/tensorflow | [
"c6bb5cd0447a0af2764c195fb14d218df8ae6471"
] | [
"tensorflow/python/ops/nn_ops.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Wrappers for primitive Neural Net (NN) Operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numbers\n\nimport numpy as np\n\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_nn_ops import *\n# pylint: enable=wildcard-import\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.deprecation import deprecated_args\nfrom tensorflow.python.util.deprecation import deprecated_argument_lookup\n\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Aliases for some automatically-generated names.\nlocal_response_normalization = gen_nn_ops.lrn\n\n# pylint: disable=protected-access\n\n\ndef _non_atrous_convolution(\n input, # pylint: disable=redefined-builtin\n filter, # pylint: disable=redefined-builtin\n padding,\n data_format=None, # pylint: disable=redefined-builtin\n strides=None,\n name=None):\n \"\"\"Computes sums of N-D convolutions (actually cross correlation).\n\n It is required that 1 <= N <= 3.\n\n This is used to implement the more generic `convolution` function, which\n extends the interface of this function with a `dilation_rate` parameter.\n\n Args:\n\n input: Rank N+2 tensor of type T of shape\n `[batch_size] + input_spatial_shape + [in_channels]` if `data_format`\n does not start with `\"NC\"`, or\n `[batch_size, in_channels] + input_spatial_shape` if `data_format` starts\n with `\"NC\"`.\n filter: Rank N+2 tensor of type T of shape\n `filter_spatial_shape + [in_channels, out_channels]`. Rank of either\n `input` or `filter` must be known.\n padding: Padding method to use, must be either \"VALID\" or \"SAME\".\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\".\n For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n strides: Sequence of N positive integers, defaults to `[1] * N`.\n name: Name prefix to use.\n\n Returns:\n Rank N+2 tensor of type T of shape\n `[batch_size] + output_spatial_shape + [out_channels]`, where\n if padding == \"SAME\":\n output_spatial_shape = input_spatial_shape\n if padding == \"VALID\":\n output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.\n\n Raises:\n ValueError: if ranks are incompatible.\n\n \"\"\"\n with ops.name_scope(name, \"non_atrous_convolution\", [input, filter]) as scope:\n input = ops.convert_to_tensor(input, name=\"input\") # pylint: disable=redefined-builtin\n input_shape = input.get_shape()\n filter = ops.convert_to_tensor(filter, name=\"filter\") # pylint: disable=redefined-builtin\n filter_shape = filter.get_shape()\n op = _NonAtrousConvolution(\n input_shape,\n filter_shape=filter_shape,\n padding=padding,\n data_format=data_format,\n strides=strides,\n name=scope)\n return op(input, filter)\n\n\nclass _NonAtrousConvolution(object):\n \"\"\"Helper class for _non_atrous_convolution.\n\n Note that this class assumes that shapes of input and filter passed to\n __call__ are compatible with input_shape and filter_shape passed to the\n constructor.\n\n Arguments:\n input_shape: static input shape, i.e. input.get_shape().\n filter_shape: static filter shape, i.e. filter.get_shape().\n padding: see _non_atrous_convolution.\n data_format: see _non_atrous_convolution.\n strides: see _non_atrous_convolution.\n name: see _non_atrous_convolution.\n \"\"\"\n\n def __init__(\n self,\n input_shape,\n filter_shape, # pylint: disable=redefined-builtin\n padding,\n data_format=None,\n strides=None,\n name=None):\n filter_shape = filter_shape.with_rank(input_shape.ndims)\n self.padding = padding\n self.name = name\n input_shape = input_shape.with_rank(filter_shape.ndims)\n if input_shape.ndims is None:\n raise ValueError(\"Rank of convolution must be known\")\n if input_shape.ndims < 3 or input_shape.ndims > 5:\n raise ValueError(\n \"`input` and `filter` must have rank at least 3 and at most 5\")\n conv_dims = input_shape.ndims - 2\n if strides is None:\n strides = [1] * conv_dims\n elif len(strides) != conv_dims:\n raise ValueError(\"len(strides)=%d, but should be %d\" % (len(strides),\n conv_dims))\n if conv_dims == 1:\n # conv1d uses the 2-d data format names\n if data_format is None:\n data_format = \"NWC\"\n elif data_format not in {\"NCW\", \"NWC\", \"NCHW\", \"NHWC\"}:\n raise ValueError(\"data_format must be \\\"NWC\\\" or \\\"NCW\\\".\")\n self.strides = strides[0]\n self.data_format = data_format\n self.conv_op = self._conv1d\n elif conv_dims == 2:\n if data_format is None or data_format == \"NHWC\":\n data_format = \"NHWC\"\n strides = [1] + list(strides) + [1]\n elif data_format == \"NCHW\":\n strides = [1, 1] + list(strides)\n else:\n raise ValueError(\"data_format must be \\\"NHWC\\\" or \\\"NCHW\\\".\")\n self.strides = strides\n self.data_format = data_format\n self.conv_op = conv2d\n elif conv_dims == 3:\n if data_format is None or data_format == \"NDHWC\":\n strides = [1] + list(strides) + [1]\n elif data_format == \"NCDHW\":\n strides = [1, 1] + list(strides)\n else:\n raise ValueError(\"data_format must be \\\"NDHWC\\\" or \\\"NCDHW\\\". Have: %s\"\n % data_format)\n self.strides = strides\n self.data_format = data_format\n self.conv_op = gen_nn_ops.conv3d\n\n # Note that we need this adapter since argument names for conv1d don't match\n # those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.\n # pylint: disable=redefined-builtin\n def _conv1d(self, input, filter, strides, padding, data_format, name):\n return conv1d(\n value=input,\n filters=filter,\n stride=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n # pylint: enable=redefined-builtin\n\n def __call__(self, inp, filter): # pylint: disable=redefined-builtin\n return self.conv_op(\n input=inp,\n filter=filter,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n name=self.name)\n\n\n@tf_export(\"nn.dilation2d\", v1=[])\ndef dilation2d_v2(\n input, # pylint: disable=redefined-builtin\n filters, # pylint: disable=redefined-builtin\n strides,\n padding,\n data_format,\n dilations,\n name=None):\n \"\"\"Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.\n\n The `input` tensor has shape `[batch, in_height, in_width, depth]` and the\n `filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each\n input channel is processed independently of the others with its own\n structuring function. The `output` tensor has shape\n `[batch, out_height, out_width, depth]`. The spatial dimensions of the output\n tensor depend on the `padding` algorithm. We currently only support the\n default \"NHWC\" `data_format`.\n\n In detail, the grayscale morphological 2-D dilation is the max-sum correlation\n (for consistency with `conv2d`, we use unmirrored filters):\n\n output[b, y, x, c] =\n max_{dy, dx} input[b,\n strides[1] * y + rates[1] * dy,\n strides[2] * x + rates[2] * dx,\n c] +\n filters[dy, dx, c]\n\n Max-pooling is a special case when the filter has size equal to the pooling\n kernel size and contains all zeros.\n\n Note on duality: The dilation of `input` by the `filters` is equal to the\n negation of the erosion of `-input` by the reflected `filters`.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,\n `uint32`, `uint64`.\n 4-D with shape `[batch, in_height, in_width, depth]`.\n filters: A `Tensor`. Must have the same type as `input`.\n 3-D with shape `[filter_height, filter_width, depth]`.\n strides: A list of `ints` that has length `>= 4`.\n The stride of the sliding window for each dimension of the input\n tensor. Must be: `[1, stride_height, stride_width, 1]`.\n padding: A `string` from: `\"SAME\", \"VALID\"`.\n The type of padding algorithm to use.\n data_format: A `string`, only `\"NCHW\"` is currently supported.\n dilations: A list of `ints` that has length `>= 4`.\n The input stride for atrous morphological dilation. Must be:\n `[1, rate_height, rate_width, 1]`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n if data_format != \"NCHW\":\n raise ValueError(\"Data formats other than NCHW are not yet supported\")\n\n return gen_nn_ops.dilation2d(input=input,\n filter=filters,\n strides=strides,\n rates=dilations,\n padding=padding,\n name=name)\n\n\n@tf_export(\"nn.with_space_to_batch\")\ndef with_space_to_batch(\n input, # pylint: disable=redefined-builtin\n dilation_rate,\n padding,\n op,\n filter_shape=None,\n spatial_dims=None,\n data_format=None):\n \"\"\"Performs `op` on the space-to-batch representation of `input`.\n\n This has the effect of transforming sliding window operations into the\n corresponding \"atrous\" operation in which the input is sampled at the\n specified `dilation_rate`.\n\n In the special case that `dilation_rate` is uniformly 1, this simply returns:\n\n op(input, num_spatial_dims, padding)\n\n Otherwise, it returns:\n\n batch_to_space_nd(\n op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),\n num_spatial_dims,\n \"VALID\")\n adjusted_dilation_rate,\n adjusted_crops),\n\n where:\n\n adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],\n adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]\n\n defined as follows:\n\n We first define two int64 tensors `paddings` and `crops` of shape\n `[num_spatial_dims, 2]` based on the value of `padding` and the spatial\n dimensions of the `input`:\n\n If `padding = \"VALID\"`, then:\n\n paddings, crops = required_space_to_batch_paddings(\n input_shape[spatial_dims],\n dilation_rate)\n\n If `padding = \"SAME\"`, then:\n\n dilated_filter_shape =\n filter_shape + (filter_shape - 1) * (dilation_rate - 1)\n\n paddings, crops = required_space_to_batch_paddings(\n input_shape[spatial_dims],\n dilation_rate,\n [(dilated_filter_shape - 1) // 2,\n dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])\n\n Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial\n dimensions are contiguous starting at the second dimension, but the specified\n `spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and\n `crops` in order to be usable with these operations. For a given dimension,\n if the block size is 1, and both the starting and ending padding and crop\n amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,\n which is what is needed for dimensions not part of `spatial_dims`.\n Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case\n efficiently for any number of leading and trailing dimensions.\n\n For 0 <= i < len(spatial_dims), we assign:\n\n adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]\n adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]\n adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]\n\n All unassigned values of `adjusted_dilation_rate` default to 1, while all\n unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.\n\n Note in the case that `dilation_rate` is not uniformly 1, specifying \"VALID\"\n padding is equivalent to specifying `padding = \"SAME\"` with a filter_shape of\n `[1]*N`.\n\n Advanced usage. Note the following optimization: A sequence of\n `with_space_to_batch` operations with identical (not uniformly 1)\n `dilation_rate` parameters and \"VALID\" padding\n\n net = with_space_to_batch(net, dilation_rate, \"VALID\", op_1)\n ...\n net = with_space_to_batch(net, dilation_rate, \"VALID\", op_k)\n\n can be combined into a single `with_space_to_batch` operation as follows:\n\n def combined_op(converted_input, num_spatial_dims, _):\n result = op_1(converted_input, num_spatial_dims, \"VALID\")\n ...\n result = op_k(result, num_spatial_dims, \"VALID\")\n\n net = with_space_to_batch(net, dilation_rate, \"VALID\", combined_op)\n\n This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and\n `batch_to_space_nd`.\n\n Similarly, a sequence of `with_space_to_batch` operations with identical (not\n uniformly 1) `dilation_rate` parameters, \"SAME\" padding, and odd filter\n dimensions\n\n net = with_space_to_batch(net, dilation_rate, \"SAME\", op_1, filter_shape_1)\n ...\n net = with_space_to_batch(net, dilation_rate, \"SAME\", op_k, filter_shape_k)\n\n can be combined into a single `with_space_to_batch` operation as follows:\n\n def combined_op(converted_input, num_spatial_dims, _):\n result = op_1(converted_input, num_spatial_dims, \"SAME\")\n ...\n result = op_k(result, num_spatial_dims, \"SAME\")\n\n net = with_space_to_batch(net, dilation_rate, \"VALID\", combined_op)\n\n Args:\n input: Tensor of rank > max(spatial_dims).\n dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].\n padding: str constant equal to \"VALID\" or \"SAME\"\n op: Function that maps (input, num_spatial_dims, padding) -> output\n filter_shape: If padding = \"SAME\", specifies the shape of the convolution\n kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].\n If padding = \"VALID\", filter_shape is ignored and need not be specified.\n spatial_dims: Monotonically increasing sequence of `num_spatial_dims`\n integers (which are >= 1) specifying the spatial dimensions of `input`\n and output. Defaults to: `range(1, num_spatial_dims+1)`.\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\".\n For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n\n Returns:\n The output Tensor as described above, dimensions will vary based on the op\n provided.\n\n Raises:\n ValueError: if `padding` is invalid or the arguments are incompatible.\n ValueError: if `spatial_dims` are invalid.\n\n \"\"\"\n input = ops.convert_to_tensor(input, name=\"input\") # pylint: disable=redefined-builtin\n input_shape = input.get_shape()\n\n def build_op(num_spatial_dims, padding):\n return lambda inp, _: op(inp, num_spatial_dims, padding)\n\n new_op = _WithSpaceToBatch(\n input_shape,\n dilation_rate,\n padding,\n build_op,\n filter_shape=filter_shape,\n spatial_dims=spatial_dims,\n data_format=data_format)\n return new_op(input, None)\n\n\nclass _WithSpaceToBatch(object):\n \"\"\"Helper class for with_space_to_batch.\n\n Note that this class assumes that shapes of input and filter passed to\n __call__ are compatible with input_shape and filter_shape passed to the\n constructor.\n\n Arguments\n input_shape: static shape of input. i.e. input.get_shape().\n dilation_rate: see with_space_to_batch\n padding: see with_space_to_batch\n build_op: Function that maps (num_spatial_dims, paddings) -> (function that\n maps (input, filter) -> output).\n filter_shape: see with_space_to_batch\n spatial_dims: see with_space_to_batch\n data_format: see with_space_to_batch\n \"\"\"\n\n def __init__(self,\n input_shape,\n dilation_rate,\n padding,\n build_op,\n filter_shape=None,\n spatial_dims=None,\n data_format=None):\n \"\"\"Helper class for _with_space_to_batch.\"\"\"\n dilation_rate = ops.convert_to_tensor(\n dilation_rate, dtypes.int32, name=\"dilation_rate\")\n try:\n rate_shape = dilation_rate.get_shape().with_rank(1)\n except ValueError:\n raise ValueError(\"rate must be rank 1\")\n\n if not dilation_rate.get_shape().is_fully_defined():\n raise ValueError(\"rate must have known shape\")\n\n num_spatial_dims = rate_shape.dims[0].value\n\n if data_format is not None and data_format.startswith(\"NC\"):\n starting_spatial_dim = 2\n else:\n starting_spatial_dim = 1\n\n if spatial_dims is None:\n spatial_dims = range(starting_spatial_dim,\n num_spatial_dims + starting_spatial_dim)\n orig_spatial_dims = list(spatial_dims)\n spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))\n if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):\n raise ValueError(\n \"spatial_dims must be a montonically increasing sequence of positive \"\n \"integers\") # pylint: disable=line-too-long\n\n if data_format is not None and data_format.startswith(\"NC\"):\n expected_input_rank = spatial_dims[-1]\n else:\n expected_input_rank = spatial_dims[-1] + 1\n\n try:\n input_shape.with_rank_at_least(expected_input_rank)\n except ValueError:\n raise ValueError(\n \"input tensor must have rank %d at least\" % (expected_input_rank))\n\n const_rate = tensor_util.constant_value(dilation_rate)\n rate_or_const_rate = dilation_rate\n if const_rate is not None:\n rate_or_const_rate = const_rate\n if np.any(const_rate < 1):\n raise ValueError(\"dilation_rate must be positive\")\n if np.all(const_rate == 1):\n self.call = build_op(num_spatial_dims, padding)\n return\n\n # We have two padding contributions. The first is used for converting \"SAME\"\n # to \"VALID\". The second is required so that the height and width of the\n # zero-padded value tensor are multiples of rate.\n\n # Padding required to reduce to \"VALID\" convolution\n if padding == \"SAME\":\n if filter_shape is None:\n raise ValueError(\"filter_shape must be specified for SAME padding\")\n filter_shape = ops.convert_to_tensor(filter_shape, name=\"filter_shape\")\n const_filter_shape = tensor_util.constant_value(filter_shape)\n if const_filter_shape is not None:\n filter_shape = const_filter_shape\n self.base_paddings = _with_space_to_batch_base_paddings(\n const_filter_shape, num_spatial_dims, rate_or_const_rate)\n else:\n self.num_spatial_dims = num_spatial_dims\n self.rate_or_const_rate = rate_or_const_rate\n self.base_paddings = None\n elif padding == \"VALID\":\n self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)\n else:\n raise ValueError(\"Invalid padding method %r\" % padding)\n\n self.input_shape = input_shape\n self.spatial_dims = spatial_dims\n self.dilation_rate = dilation_rate\n self.data_format = data_format\n self.op = build_op(num_spatial_dims, \"VALID\")\n self.call = self._with_space_to_batch_call\n\n def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin\n \"\"\"Call functionality for with_space_to_batch.\"\"\"\n # Handle input whose shape is unknown during graph creation.\n input_spatial_shape = None\n input_shape = self.input_shape\n spatial_dims = self.spatial_dims\n if input_shape.ndims is not None:\n input_shape_list = input_shape.as_list()\n input_spatial_shape = [input_shape_list[i] for i in spatial_dims]\n if input_spatial_shape is None or None in input_spatial_shape:\n input_shape_tensor = array_ops.shape(inp)\n input_spatial_shape = array_ops.stack(\n [input_shape_tensor[i] for i in spatial_dims])\n\n base_paddings = self.base_paddings\n if base_paddings is None:\n # base_paddings could not be computed at build time since static filter\n # shape was not fully defined.\n filter_shape = array_ops.shape(filter)\n base_paddings = _with_space_to_batch_base_paddings(\n filter_shape, self.num_spatial_dims, self.rate_or_const_rate)\n paddings, crops = array_ops.required_space_to_batch_paddings(\n input_shape=input_spatial_shape,\n base_paddings=base_paddings,\n block_shape=self.dilation_rate)\n\n dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,\n spatial_dims)\n paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)\n crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)\n input_converted = array_ops.space_to_batch_nd(\n input=inp, block_shape=dilation_rate, paddings=paddings)\n\n result = self.op(input_converted, filter)\n\n result_converted = array_ops.batch_to_space_nd(\n input=result, block_shape=dilation_rate, crops=crops)\n\n # Recover channel information for output shape if channels are not last.\n if self.data_format is not None and self.data_format.startswith(\"NC\"):\n if not result_converted.shape.dims[1].value and filter is not None:\n output_shape = result_converted.shape.as_list()\n output_shape[1] = filter.shape[-1]\n result_converted.set_shape(output_shape)\n\n return result_converted\n\n def __call__(self, inp, filter): # pylint: disable=redefined-builtin\n return self.call(inp, filter)\n\n\ndef _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,\n rate_or_const_rate):\n \"\"\"Helper function to compute base_paddings.\"\"\"\n # Spatial dimensions of the filters and the upsampled filters in which we\n # introduce (rate - 1) zeros between consecutive filter values.\n filter_spatial_shape = filter_shape[:num_spatial_dims]\n dilated_filter_spatial_shape = (\n filter_spatial_shape + (filter_spatial_shape - 1) *\n (rate_or_const_rate - 1))\n pad_extra_shape = dilated_filter_spatial_shape - 1\n\n # When full_padding_shape is odd, we pad more at end, following the same\n # convention as conv2d.\n pad_extra_start = pad_extra_shape // 2\n pad_extra_end = pad_extra_shape - pad_extra_start\n base_paddings = array_ops.stack(\n [[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])\n return base_paddings\n\n\ndef _with_space_to_batch_adjust(orig, fill_value, spatial_dims):\n \"\"\"Returns an `adjusted` version of `orig` based on `spatial_dims`.\n\n Tensor of the same type as `orig` and with shape\n `[max(spatial_dims), ...]` where:\n\n adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]\n\n for 0 <= i < len(spatial_dims), and\n\n adjusted[j, ...] = fill_value\n\n for j != spatial_dims[i] - 1 for some i.\n\n If `orig` is a constant value, then the result will be a constant value.\n\n Args:\n orig: Tensor of rank > max(spatial_dims).\n fill_value: Numpy scalar (of same data type as `orig) specifying the fill\n value for non-spatial dimensions.\n spatial_dims: See with_space_to_batch.\n\n Returns:\n `adjusted` tensor.\n \"\"\"\n fill_dims = orig.get_shape().as_list()[1:]\n dtype = orig.dtype.as_numpy_dtype\n parts = []\n const_orig = tensor_util.constant_value(orig)\n const_or_orig = const_orig if const_orig is not None else orig\n prev_spatial_dim = 0\n i = 0\n while i < len(spatial_dims):\n start_i = i\n start_spatial_dim = spatial_dims[i]\n if start_spatial_dim > 1:\n # Fill in any gap from the previous spatial dimension (or dimension 1 if\n # this is the first spatial dimension) with `fill_value`.\n parts.append(\n np.full(\n [start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,\n fill_value,\n dtype=dtype))\n # Find the largest value of i such that:\n # [spatial_dims[start_i], ..., spatial_dims[i]]\n # == [start_spatial_dim, ..., start_spatial_dim + i - start_i],\n # i.e. the end of a contiguous group of spatial dimensions.\n while (i + 1 < len(spatial_dims) and\n spatial_dims[i + 1] == spatial_dims[i] + 1):\n i += 1\n parts.append(const_or_orig[start_i:i + 1])\n prev_spatial_dim = spatial_dims[i]\n i += 1\n if const_orig is not None:\n return np.concatenate(parts)\n else:\n return array_ops.concat(parts, 0)\n\n\ndef _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):\n \"\"\"Helper function for verifying strides and dilation_rate arguments.\n\n This is used by `convolution` and `pool`.\n\n Args:\n num_spatial_dims: int\n strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value\n of strides is > 1, then all values of dilation_rate must be 1.\n dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any\n value of dilation_rate is > 1, then all values of strides must be 1.\n\n Returns:\n Normalized (strides, dilation_rate) as int32 numpy arrays of shape\n [num_spatial_dims].\n\n Raises:\n ValueError: if the parameters are invalid.\n \"\"\"\n if dilation_rate is None:\n dilation_rate = [1] * num_spatial_dims\n elif len(dilation_rate) != num_spatial_dims:\n raise ValueError(\"len(dilation_rate)=%d but should be %d\" %\n (len(dilation_rate), num_spatial_dims))\n dilation_rate = np.array(dilation_rate, dtype=np.int32)\n if np.any(dilation_rate < 1):\n raise ValueError(\"all values of dilation_rate must be positive\")\n\n if strides is None:\n strides = [1] * num_spatial_dims\n elif len(strides) != num_spatial_dims:\n raise ValueError(\"len(strides)=%d but should be %d\" % (len(strides),\n num_spatial_dims))\n strides = np.array(strides, dtype=np.int32)\n if np.any(strides < 1):\n raise ValueError(\"all values of strides must be positive\")\n\n if np.any(strides > 1) and np.any(dilation_rate > 1):\n raise ValueError(\n \"strides > 1 not supported in conjunction with dilation_rate > 1\")\n return strides, dilation_rate\n\n\n@tf_export(v1=[\"nn.convolution\"])\ndef convolution(\n input, # pylint: disable=redefined-builtin\n filter, # pylint: disable=redefined-builtin\n padding,\n strides=None,\n dilation_rate=None,\n name=None,\n data_format=None):\n # pylint: disable=line-too-long\n \"\"\"Computes sums of N-D convolutions (actually cross-correlation).\n\n This also supports either output striding via the optional `strides` parameter\n or atrous convolution (also known as convolution with holes or dilated\n convolution, based on the French word \"trous\" meaning holes in English) via\n the optional `dilation_rate` parameter. Currently, however, output striding\n is not supported for atrous convolutions.\n\n Specifically, in the case that `data_format` does not start with \"NC\", given\n a rank (N+2) `input` Tensor of shape\n\n [num_batches,\n input_spatial_shape[0],\n ...,\n input_spatial_shape[N-1],\n num_input_channels],\n\n a rank (N+2) `filter` Tensor of shape\n\n [spatial_filter_shape[0],\n ...,\n spatial_filter_shape[N-1],\n num_input_channels,\n num_output_channels],\n\n an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)\n specifying the filter upsampling/input downsampling rate, and an optional list\n of N `strides` (defaulting [1]*N), this computes for each N-D spatial output\n position (x[0], ..., x[N-1]):\n\n ```\n output[b, x[0], ..., x[N-1], k] =\n sum_{z[0], ..., z[N-1], q}\n filter[z[0], ..., z[N-1], q, k] *\n padded_input[b,\n x[0]*strides[0] + dilation_rate[0]*z[0],\n ...,\n x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],\n q]\n ```\n where b is the index into the batch, k is the output channel number, q is the\n input channel number, and z is the N-D spatial offset within the filter. Here,\n `padded_input` is obtained by zero padding the input using an effective\n spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and\n output striding `strides` as described in the\n [comment here](https://tensorflow.org/api_guides/python/nn#Convolution).\n\n In the case that `data_format` does start with `\"NC\"`, the `input` and output\n (but not the `filter`) are simply transposed as follows:\n\n convolution(input, data_format, **kwargs) =\n tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),\n **kwargs),\n [0, N+1] + range(1, N+1))\n\n It is required that 1 <= N <= 3.\n\n Args:\n input: An (N+2)-D `Tensor` of type `T`, of shape\n `[batch_size] + input_spatial_shape + [in_channels]` if data_format does\n not start with \"NC\" (default), or\n `[batch_size, in_channels] + input_spatial_shape` if data_format starts\n with \"NC\".\n filter: An (N+2)-D `Tensor` with the same type as `input` and shape\n `spatial_filter_shape + [in_channels, out_channels]`.\n padding: A string, either `\"VALID\"` or `\"SAME\"`. The padding algorithm.\n strides: Optional. Sequence of N ints >= 1. Specifies the output stride.\n Defaults to [1]*N. If any value of strides is > 1, then all values of\n dilation_rate must be 1.\n dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter\n upsampling/input downsampling rate. In the literature, the same parameter\n is sometimes called `input stride` or `dilation`. The effective filter\n size used for the convolution will be `spatial_filter_shape +\n (spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting\n (dilation_rate[i]-1) zeros between consecutive elements of the original\n filter in each spatial dimension i. If any value of dilation_rate is > 1,\n then all values of strides must be 1.\n name: Optional name for the returned tensor.\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\".\n For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n\n Returns:\n A `Tensor` with the same type as `input` of shape\n\n `[batch_size] + output_spatial_shape + [out_channels]`\n\n if data_format is None or does not start with \"NC\", or\n\n `[batch_size, out_channels] + output_spatial_shape`\n\n if data_format starts with \"NC\",\n where `output_spatial_shape` depends on the value of `padding`.\n\n If padding == \"SAME\":\n output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])\n\n If padding == \"VALID\":\n output_spatial_shape[i] =\n ceil((input_spatial_shape[i] -\n (spatial_filter_shape[i]-1) * dilation_rate[i])\n / strides[i]).\n\n Raises:\n ValueError: If input/output depth does not match `filter` shape, if padding\n is other than `\"VALID\"` or `\"SAME\"`, or if data_format is invalid.\n\n \"\"\"\n # pylint: enable=line-too-long\n with ops.name_scope(name, \"convolution\", [input, filter]) as name:\n input = ops.convert_to_tensor(input, name=\"input\") # pylint: disable=redefined-builtin\n input_shape = input.get_shape()\n filter = ops.convert_to_tensor(filter, name=\"filter\") # pylint: disable=redefined-builtin\n filter_shape = filter.get_shape()\n op = Convolution(\n input_shape,\n filter_shape,\n padding,\n strides=strides,\n dilation_rate=dilation_rate,\n name=name,\n data_format=data_format)\n return op(input, filter)\n\n\n@tf_export(\"nn.convolution\", v1=[])\ndef convolution_v2(\n input, # pylint: disable=redefined-builtin\n filters,\n strides=None,\n padding=\"VALID\",\n data_format=None,\n dilations=None,\n name=None):\n return convolution(\n input, # pylint: disable=redefined-builtin\n filters,\n padding=padding,\n strides=strides,\n dilation_rate=dilations,\n name=name,\n data_format=data_format)\n\nconvolution_v2.__doc__ = deprecation.rewrite_argument_docstring(\n deprecation.rewrite_argument_docstring(\n convolution.__doc__, \"dilation_rate\", \"dilations\"),\n \"filter\", \"filters\")\n\n\nclass Convolution(object):\n \"\"\"Helper class for convolution.\n\n Note that this class assumes that shapes of input and filter passed to\n __call__ are compatible with input_shape and filter_shape passed to the\n constructor.\n\n Arguments\n input_shape: static shape of input. i.e. input.get_shape().\n filter_shape: static shape of the filter. i.e. filter.get_shape().\n padding: see convolution.\n strides: see convolution.\n dilation_rate: see convolution.\n name: see convolution.\n data_format: see convolution.\n \"\"\"\n\n def __init__(self,\n input_shape,\n filter_shape,\n padding,\n strides=None,\n dilation_rate=None,\n name=None,\n data_format=None):\n \"\"\"Helper function for convolution.\"\"\"\n num_total_dims = filter_shape.ndims\n if num_total_dims is None:\n num_total_dims = input_shape.ndims\n if num_total_dims is None:\n raise ValueError(\"rank of input or filter must be known\")\n\n num_spatial_dims = num_total_dims - 2\n\n try:\n input_shape.with_rank(num_spatial_dims + 2)\n except ValueError:\n raise ValueError(\n \"input tensor must have rank %d\" % (num_spatial_dims + 2))\n\n try:\n filter_shape.with_rank(num_spatial_dims + 2)\n except ValueError:\n raise ValueError(\n \"filter tensor must have rank %d\" % (num_spatial_dims + 2))\n\n if data_format is None or not data_format.startswith(\"NC\"):\n input_channels_dim = tensor_shape.dimension_at_index(\n input_shape, num_spatial_dims + 1)\n spatial_dims = range(1, num_spatial_dims + 1)\n else:\n input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)\n spatial_dims = range(2, num_spatial_dims + 2)\n\n if not input_channels_dim.is_compatible_with(\n filter_shape[num_spatial_dims]):\n raise ValueError(\n \"number of input channels does not match corresponding dimension of \"\n \"filter, {} != {}\".format(input_channels_dim,\n filter_shape[num_spatial_dims]))\n\n strides, dilation_rate = _get_strides_and_dilation_rate(\n num_spatial_dims, strides, dilation_rate)\n\n self.input_shape = input_shape\n self.filter_shape = filter_shape\n self.data_format = data_format\n self.strides = strides\n self.name = name\n self.conv_op = _WithSpaceToBatch(\n input_shape,\n dilation_rate=dilation_rate,\n padding=padding,\n build_op=self._build_op,\n filter_shape=filter_shape,\n spatial_dims=spatial_dims,\n data_format=data_format)\n\n def _build_op(self, _, padding):\n return _NonAtrousConvolution(\n self.input_shape,\n filter_shape=self.filter_shape,\n padding=padding,\n data_format=self.data_format,\n strides=self.strides,\n name=self.name)\n\n def __call__(self, inp, filter): # pylint: disable=redefined-builtin\n return self.conv_op(inp, filter)\n\n\n@tf_export(v1=[\"nn.pool\"])\ndef pool(\n input, # pylint: disable=redefined-builtin\n window_shape,\n pooling_type,\n padding,\n dilation_rate=None,\n strides=None,\n name=None,\n data_format=None):\n # pylint: disable=line-too-long\n \"\"\"Performs an N-D pooling operation.\n\n In the case that `data_format` does not start with \"NC\", computes for\n 0 <= b < batch_size,\n 0 <= x[i] < output_spatial_shape[i],\n 0 <= c < num_channels:\n\n ```\n output[b, x[0], ..., x[N-1], c] =\n REDUCE_{z[0], ..., z[N-1]}\n input[b,\n x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],\n ...\n x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],\n c],\n ```\n\n where the reduction function REDUCE depends on the value of `pooling_type`,\n and pad_before is defined based on the value of `padding` as described in\n the \"returns\" section of `tf.nn.convolution` for details.\n The reduction never includes out-of-bounds positions.\n\n In the case that `data_format` starts with `\"NC\"`, the `input` and output are\n simply transposed as follows:\n\n ```\n pool(input, data_format, **kwargs) =\n tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),\n **kwargs),\n [0, N+1] + range(1, N+1))\n ```\n\n Args:\n input: Tensor of rank N+2, of shape\n `[batch_size] + input_spatial_shape + [num_channels]` if data_format does\n not start with \"NC\" (default), or\n `[batch_size, num_channels] + input_spatial_shape` if data_format starts\n with \"NC\". Pooling happens over the spatial dimensions only.\n window_shape: Sequence of N ints >= 1.\n pooling_type: Specifies pooling operation, must be \"AVG\" or \"MAX\".\n padding: The padding algorithm, must be \"SAME\" or \"VALID\".\n See the \"returns\" section of `tf.nn.convolution` for details.\n dilation_rate: Optional. Dilation rate. List of N ints >= 1.\n Defaults to [1]*N. If any value of dilation_rate is > 1, then all values\n of strides must be 1.\n strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.\n If any value of strides is > 1, then all values of dilation_rate must be\n 1.\n name: Optional. Name of the op.\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\".\n For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n\n Returns:\n Tensor of rank N+2, of shape\n [batch_size] + output_spatial_shape + [num_channels]\n\n if data_format is None or does not start with \"NC\", or\n\n [batch_size, num_channels] + output_spatial_shape\n\n if data_format starts with \"NC\",\n where `output_spatial_shape` depends on the value of padding:\n\n If padding = \"SAME\":\n output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])\n\n If padding = \"VALID\":\n output_spatial_shape[i] =\n ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])\n / strides[i]).\n\n Raises:\n ValueError: if arguments are invalid.\n\n \"\"\"\n # pylint: enable=line-too-long\n with ops.name_scope(name, \"%s_pool\" % (pooling_type.lower()),\n [input]) as scope:\n input = ops.convert_to_tensor(input, name=\"input\") # pylint: disable=redefined-builtin\n\n num_spatial_dims = len(window_shape)\n if num_spatial_dims < 1 or num_spatial_dims > 3:\n raise ValueError(\"It is required that 1 <= num_spatial_dims <= 3.\")\n\n input.get_shape().with_rank(num_spatial_dims + 2)\n\n strides, dilation_rate = _get_strides_and_dilation_rate(\n num_spatial_dims, strides, dilation_rate)\n\n if padding == \"SAME\" and np.any(dilation_rate > 1):\n raise ValueError(\n \"pooling with SAME padding is not implemented for dilation_rate > 1\")\n\n if np.any(strides > window_shape):\n raise ValueError(\n \"strides > window_shape not supported due to inconsistency between \"\n \"CPU and GPU implementations\")\n\n pooling_ops = {\n (\"MAX\", 1): max_pool,\n (\"MAX\", 2): max_pool,\n (\"MAX\", 3): max_pool3d, # pylint: disable=undefined-variable\n (\"AVG\", 1): avg_pool,\n (\"AVG\", 2): avg_pool,\n (\"AVG\", 3): avg_pool3d, # pylint: disable=undefined-variable\n }\n op_key = (pooling_type, num_spatial_dims)\n if op_key not in pooling_ops:\n raise ValueError(\"%d-D %s pooling is not supported.\" % (op_key[1],\n op_key[0]))\n\n if data_format is None or not data_format.startswith(\"NC\"):\n adjusted_window_shape = [1] + list(window_shape) + [1]\n adjusted_strides = [1] + list(strides) + [1]\n spatial_dims = range(1, num_spatial_dims + 1)\n else:\n adjusted_window_shape = [1, 1] + list(window_shape)\n adjusted_strides = [1, 1] + list(strides)\n spatial_dims = range(2, num_spatial_dims + 2)\n\n if num_spatial_dims == 1:\n if data_format is None or data_format == \"NWC\":\n data_format_kwargs = dict(data_format=\"NHWC\")\n elif data_format == \"NCW\":\n data_format_kwargs = dict(data_format=\"NCHW\")\n else:\n raise ValueError(\"data_format must be either \\\"NWC\\\" or \\\"NCW\\\".\")\n adjusted_window_shape = [1] + adjusted_window_shape\n adjusted_strides = [1] + adjusted_strides\n else:\n data_format_kwargs = dict(data_format=data_format)\n\n def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring\n if num_spatial_dims == 1:\n converted_input = array_ops.expand_dims(converted_input,\n spatial_dims[0])\n result = pooling_ops[op_key](\n converted_input,\n adjusted_window_shape,\n adjusted_strides,\n converted_padding,\n name=scope,\n **data_format_kwargs)\n if num_spatial_dims == 1:\n result = array_ops.squeeze(result, [spatial_dims[0]])\n return result\n\n return with_space_to_batch(\n input=input,\n dilation_rate=dilation_rate,\n padding=padding,\n op=op,\n spatial_dims=spatial_dims,\n filter_shape=window_shape)\n\n\n@tf_export(\"nn.pool\", v1=[])\ndef pool_v2(\n input, # pylint: disable=redefined-builtin\n window_shape,\n pooling_type,\n strides=None,\n padding=\"VALID\",\n data_format=None,\n dilations=None,\n name=None):\n # pylint: disable=line-too-long\n \"\"\"Performs an N-D pooling operation.\n\n In the case that `data_format` does not start with \"NC\", computes for\n 0 <= b < batch_size,\n 0 <= x[i] < output_spatial_shape[i],\n 0 <= c < num_channels:\n\n ```\n output[b, x[0], ..., x[N-1], c] =\n REDUCE_{z[0], ..., z[N-1]}\n input[b,\n x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],\n ...\n x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],\n c],\n ```\n\n where the reduction function REDUCE depends on the value of `pooling_type`,\n and pad_before is defined based on the value of `padding` as described in\n the \"returns\" section of `tf.nn.convolution` for details.\n The reduction never includes out-of-bounds positions.\n\n In the case that `data_format` starts with `\"NC\"`, the `input` and output are\n simply transposed as follows:\n\n ```\n pool(input, data_format, **kwargs) =\n tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),\n **kwargs),\n [0, N+1] + range(1, N+1))\n ```\n\n Args:\n input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +\n [num_channels]` if data_format does not start with \"NC\" (default), or\n `[batch_size, num_channels] + input_spatial_shape` if data_format starts\n with \"NC\". Pooling happens over the spatial dimensions only.\n window_shape: Sequence of N ints >= 1.\n pooling_type: Specifies pooling operation, must be \"AVG\" or \"MAX\".\n strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of\n strides is > 1, then all values of dilation_rate must be 1.\n padding: The padding algorithm, must be \"SAME\" or \"VALID\". Defaults to \"SAME\".\n See the \"returns\" section of `tf.nn.convolution` for details.\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\". For\n N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to\n [1]*N. If any value of dilation_rate is > 1, then all values of strides\n must be 1.\n name: Optional. Name of the op.\n\n Returns:\n Tensor of rank N+2, of shape\n [batch_size] + output_spatial_shape + [num_channels]\n\n if data_format is None or does not start with \"NC\", or\n\n [batch_size, num_channels] + output_spatial_shape\n\n if data_format starts with \"NC\",\n where `output_spatial_shape` depends on the value of padding:\n\n If padding = \"SAME\":\n output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])\n\n If padding = \"VALID\":\n output_spatial_shape[i] =\n ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])\n / strides[i]).\n\n Raises:\n ValueError: if arguments are invalid.\n\n \"\"\"\n return pool(\n input=input,\n window_shape=window_shape,\n pooling_type=pooling_type,\n padding=padding,\n dilation_rate=dilations,\n strides=strides,\n name=name,\n data_format=data_format)\n\n\n@tf_export(\"nn.atrous_conv2d\")\ndef atrous_conv2d(value, filters, rate, padding, name=None):\n \"\"\"Atrous convolution (a.k.a. convolution with holes or dilated convolution).\n\n This function is a simpler wrapper around the more general\n `tf.nn.convolution`, and exists only for backwards compatibility. You can\n use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.\n\n\n Computes a 2-D atrous convolution, also known as convolution with holes or\n dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`\n parameter is equal to one, it performs regular 2-D convolution. If the `rate`\n parameter is greater than one, it performs convolution with holes, sampling\n the input values every `rate` pixels in the `height` and `width` dimensions.\n This is equivalent to convolving the input with a set of upsampled filters,\n produced by inserting `rate - 1` zeros between two consecutive values of the\n filters along the `height` and `width` dimensions, hence the name atrous\n convolution or convolution with holes (the French word trous means holes in\n English).\n\n More specifically:\n\n ```\n output[batch, height, width, out_channel] =\n sum_{dheight, dwidth, in_channel} (\n filters[dheight, dwidth, in_channel, out_channel] *\n value[batch, height + rate*dheight, width + rate*dwidth, in_channel]\n )\n ```\n\n Atrous convolution allows us to explicitly control how densely to compute\n feature responses in fully convolutional networks. Used in conjunction with\n bilinear interpolation, it offers an alternative to `conv2d_transpose` in\n dense prediction tasks such as semantic image segmentation, optical flow\n computation, or depth estimation. It also allows us to effectively enlarge\n the field of view of filters without increasing the number of parameters or\n the amount of computation.\n\n For a description of atrous convolution and how it can be used for dense\n feature extraction, please see: [Semantic Image Segmentation with Deep\n Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).\n The same operation is investigated further in [Multi-Scale Context Aggregation\n by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works\n that effectively use atrous convolution in different ways are, among others,\n [OverFeat: Integrated Recognition, Localization and Detection using\n Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image\n Scanning with Deep Max-Pooling Convolutional Neural\n Networks](http://arxiv.org/abs/1302.1700).\n Atrous convolution is also closely related to the so-called noble identities\n in multi-rate signal processing.\n\n There are many different ways to implement atrous convolution (see the refs\n above). The implementation here reduces\n\n ```python\n atrous_conv2d(value, filters, rate, padding=padding)\n ```\n\n to the following three operations:\n\n ```python\n paddings = ...\n net = space_to_batch(value, paddings, block_size=rate)\n net = conv2d(net, filters, strides=[1, 1, 1, 1], padding=\"VALID\")\n crops = ...\n net = batch_to_space(net, crops, block_size=rate)\n ```\n\n Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`\n operations with identical `rate` parameters, 'SAME' `padding`, and filters\n with odd heights/ widths:\n\n ```python\n net = atrous_conv2d(net, filters1, rate, padding=\"SAME\")\n net = atrous_conv2d(net, filters2, rate, padding=\"SAME\")\n ...\n net = atrous_conv2d(net, filtersK, rate, padding=\"SAME\")\n ```\n\n can be equivalently performed cheaper in terms of computation and memory as:\n\n ```python\n pad = ... # padding so that the input dims are multiples of rate\n net = space_to_batch(net, paddings=pad, block_size=rate)\n net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding=\"SAME\")\n net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding=\"SAME\")\n ...\n net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding=\"SAME\")\n net = batch_to_space(net, crops=pad, block_size=rate)\n ```\n\n because a pair of consecutive `space_to_batch` and `batch_to_space` ops with\n the same `block_size` cancel out when their respective `paddings` and `crops`\n inputs are identical.\n\n Args:\n value: A 4-D `Tensor` of type `float`. It needs to be in the default \"NHWC\"\n format. Its shape is `[batch, in_height, in_width, in_channels]`.\n filters: A 4-D `Tensor` with the same type as `value` and shape\n `[filter_height, filter_width, in_channels, out_channels]`. `filters`'\n `in_channels` dimension must match that of `value`. Atrous convolution is\n equivalent to standard convolution with upsampled filters with effective\n height `filter_height + (filter_height - 1) * (rate - 1)` and effective\n width `filter_width + (filter_width - 1) * (rate - 1)`, produced by\n inserting `rate - 1` zeros along consecutive elements across the\n `filters`' spatial dimensions.\n rate: A positive int32. The stride with which we sample input values across\n the `height` and `width` dimensions. Equivalently, the rate by which we\n upsample the filter values by inserting zeros across the `height` and\n `width` dimensions. In the literature, the same parameter is sometimes\n called `input stride` or `dilation`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n name: Optional name for the returned tensor.\n\n Returns:\n A `Tensor` with the same type as `value`.\n Output shape with `'VALID'` padding is:\n\n [batch, height - 2 * (filter_width - 1),\n width - 2 * (filter_height - 1), out_channels].\n\n Output shape with `'SAME'` padding is:\n\n [batch, height, width, out_channels].\n\n Raises:\n ValueError: If input/output depth does not match `filters`' shape, or if\n padding is other than `'VALID'` or `'SAME'`.\n \"\"\"\n return convolution(\n input=value,\n filter=filters,\n padding=padding,\n dilation_rate=np.broadcast_to(rate, (2,)),\n name=name)\n\n\ndef _convert_padding(padding):\n \"\"\"Converts Python padding to C++ padding for ops which take EXPLICIT padding.\n\n Args:\n padding: the `padding` argument for a Python op which supports EXPLICIT\n padding.\n\n Returns:\n (padding, explicit_paddings) pair, which should be passed as attributes to a\n C++ op.\n\n Raises:\n ValueError: If padding is invalid.\n \"\"\"\n explicit_paddings = []\n if padding == \"EXPLICIT\":\n # Give a better error message if EXPLICIT is passed.\n raise ValueError('\"EXPLICIT\" is not a valid value for the padding '\n \"parameter. To use explicit padding, the padding \"\n \"parameter must be a list.\")\n if isinstance(padding, (list, tuple)):\n for i, dim_paddings in enumerate(padding):\n if not isinstance(dim_paddings, (list, tuple)):\n raise ValueError(\"When padding is a list, each element of padding must \"\n \"be a list/tuple of size 2. Element with index %d of \"\n \"padding is not a list/tuple\" % i)\n if len(dim_paddings) != 2:\n raise ValueError(\"When padding is a list, each element of padding must \"\n \"be a list/tuple of size 2. Element with index %d of \"\n \"padding has size %d\" % (i, len(dim_paddings)))\n explicit_paddings.extend(dim_paddings)\n if len(padding) != 4:\n raise ValueError(\"When padding is a list, it must be of size 4. Got \"\n \"padding of size: %d\" % len(padding))\n padding = \"EXPLICIT\"\n return padding, explicit_paddings\n\n\n@tf_export(\"nn.conv2d\", v1=[])\ndef conv2d_v2(input, # pylint: disable=redefined-builtin\n filters,\n strides,\n padding,\n data_format=\"NHWC\",\n dilations=None,\n name=None):\n # pylint: disable=line-too-long\n r\"\"\"Computes a 2-D convolution given 4-D `input` and `filters` tensors.\n\n Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\n and a filter / kernel tensor of shape\n `[filter_height, filter_width, in_channels, out_channels]`, this op\n performs the following:\n\n 1. Flattens the filter to a 2-D matrix with shape\n `[filter_height * filter_width * in_channels, output_channels]`.\n 2. Extracts image patches from the input tensor to form a *virtual*\n tensor of shape `[batch, out_height, out_width,\n filter_height * filter_width * in_channels]`.\n 3. For each patch, right-multiplies the filter matrix and the image patch\n vector.\n\n In detail, with the default NHWC format,\n\n output[b, i, j, k] =\n sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n filter[di, dj, q, k]\n\n Must have `strides[0] = strides[3] = 1`. For the most common case of the same\n horizontal and vertices strides, `strides = [1, stride, stride, 1]`.\n\n Args:\n input: A `Tensor`. Must be one of the following types:\n `half`, `bfloat16`, `float32`, `float64`.\n A 4-D tensor. The dimension order is interpreted according to the value\n of `data_format`, see below for details.\n filters: A `Tensor`. Must have the same type as `input`.\n A 4-D tensor of shape\n `[filter_height, filter_width, in_channels, out_channels]`\n strides: A list of `ints`.\n 1-D tensor of length 4. The stride of the sliding window for each\n dimension of `input`. The dimension order is determined by the value of\n `data_format`, see below for details.\n padding: Either the `string `\"SAME\"` or `\"VALID\"` indicating the type of\n padding algorithm to use, or a list indicating the explicit paddings at\n the start and end of each dimension. When explicit padding is used and\n data_format is `\"NHWC\"`, this should be in the form `[[0, 0], [pad_top,\n pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used\n and data_format is `\"NCHW\"`, this should be in the form `[[0, 0], [0, 0],\n [pad_top, pad_bottom], [pad_left, pad_right]]`.\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\"`.\n Defaults to `\"NHWC\"`.\n Specify the data format of the input and output data. With the\n default format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\n Alternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width].\n dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.\n 1-D tensor of length 4. The dilation factor for each dimension of\n `input`. If set to k > 1, there will be k-1 skipped cells between each\n filter element on that dimension. The dimension order is determined by the\n value of `data_format`, see above for details. Dilations in the batch and\n depth dimensions must be 1.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n # pylint: enable=line-too-long\n if dilations is None:\n dilations = [1, 1, 1, 1]\n return conv2d(input, # pylint: disable=redefined-builtin\n filters,\n strides,\n padding,\n use_cudnn_on_gpu=True,\n data_format=data_format,\n dilations=dilations,\n name=name)\n\n\n@tf_export(v1=[\"nn.conv2d\"])\ndef conv2d( # pylint: disable=redefined-builtin,dangerous-default-value\n input,\n filter,\n strides,\n padding,\n use_cudnn_on_gpu=True,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=None):\n r\"\"\"Computes a 2-D convolution given 4-D `input` and `filter` tensors.\n\n Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\n and a filter / kernel tensor of shape\n `[filter_height, filter_width, in_channels, out_channels]`, this op\n performs the following:\n\n 1. Flattens the filter to a 2-D matrix with shape\n `[filter_height * filter_width * in_channels, output_channels]`.\n 2. Extracts image patches from the input tensor to form a *virtual*\n tensor of shape `[batch, out_height, out_width,\n filter_height * filter_width * in_channels]`.\n 3. For each patch, right-multiplies the filter matrix and the image patch\n vector.\n\n In detail, with the default NHWC format,\n\n output[b, i, j, k] =\n sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]\n * filter[di, dj, q, k]\n\n Must have `strides[0] = strides[3] = 1`. For the most common case of the same\n horizontal and vertices strides, `strides = [1, stride, stride, 1]`.\n\n Args:\n input: A `Tensor`. Must be one of the following types:\n `half`, `bfloat16`, `float32`, `float64`.\n A 4-D tensor. The dimension order is interpreted according to the value\n of `data_format`, see below for details.\n filter: A `Tensor`. Must have the same type as `input`.\n A 4-D tensor of shape\n `[filter_height, filter_width, in_channels, out_channels]`\n strides: A list of `ints`.\n 1-D tensor of length 4. The stride of the sliding window for each\n dimension of `input`. The dimension order is determined by the value of\n `data_format`, see below for details.\n padding: Either the `string `\"SAME\"` or `\"VALID\"` indicating the type of\n padding algorithm to use, or a list indicating the explicit paddings at\n the start and end of each dimension. When explicit padding is used and\n data_format is `\"NHWC\"`, this should be in the form `[[0, 0], [pad_top,\n pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used\n and data_format is `\"NCHW\"`, this should be in the form `[[0, 0], [0, 0],\n [pad_top, pad_bottom], [pad_left, pad_right]]`.\n use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\"`.\n Defaults to `\"NHWC\"`.\n Specify the data format of the input and output data. With the\n default format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\n Alternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width].\n dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.\n 1-D tensor of length 4. The dilation factor for each dimension of\n `input`. If set to k > 1, there will be k-1 skipped cells between each\n filter element on that dimension. The dimension order is determined by the\n value of `data_format`, see above for details. Dilations in the batch and\n depth dimensions must be 1.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n padding, explicit_paddings = _convert_padding(padding)\n return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin\n filter,\n strides,\n padding,\n use_cudnn_on_gpu=use_cudnn_on_gpu,\n explicit_paddings=explicit_paddings,\n data_format=data_format,\n dilations=dilations,\n name=name)\n\n\n@tf_export(\"nn.conv2d_backprop_filter\", v1=[])\ndef conv2d_backprop_filter_v2(input, # pylint: disable=redefined-builtin\n filter_sizes,\n out_backprop,\n strides,\n padding,\n data_format=\"NHWC\",\n dilations=None,\n name=None):\n r\"\"\"Computes the gradients of convolution with respect to the filter.\n\n Args:\n input: A `Tensor`. Must be one of the following types:\n `half`, `bfloat16`, `float32`, `float64`.\n 4-D with shape `[batch, in_height, in_width, in_channels]`.\n filter_sizes: A `Tensor` of type `int32`.\n An integer vector representing the tensor shape of `filter`,\n where `filter` is a 4-D\n `[filter_height, filter_width, in_channels, out_channels]` tensor.\n out_backprop: A `Tensor`. Must have the same type as `input`.\n 4-D with shape `[batch, out_height, out_width, out_channels]`.\n Gradients w.r.t. the output of the convolution.\n strides: A list of `ints`.\n The stride of the sliding window for each dimension of the input\n of the convolution. Must be in the same order as the dimension specified\n with format.\n padding: Either the `string `\"SAME\"` or `\"VALID\"` indicating the type of\n padding algorithm to use, or a list indicating the explicit paddings at\n the start and end of each dimension. When explicit padding is used and\n data_format is `\"NHWC\"`, this should be in the form `[[0, 0], [pad_top,\n pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used\n and data_format is `\"NCHW\"`, this should be in the form `[[0, 0], [0, 0],\n [pad_top, pad_bottom], [pad_left, pad_right]]`.\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\"`.\n Defaults to `\"NHWC\"`.\n Specify the data format of the input and output data. With the\n default format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\n Alternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\n dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.\n 1-D tensor of length 4. The dilation factor for each dimension of\n `input`. If set to k > 1, there will be k-1 skipped cells between each\n filter element on that dimension. The dimension order is determined by\n the value of `data_format`, see above for details. Dilations in the batch\n and depth dimensions must be 1.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n if dilations is None:\n dilations = [1, 1, 1, 1]\n return conv2d_backprop_filter(input, # pylint: disable=redefined-builtin\n filter_sizes,\n out_backprop,\n strides,\n padding,\n use_cudnn_on_gpu=True,\n data_format=data_format,\n dilations=dilations,\n name=name)\n\n\n@tf_export(v1=[\"nn.conv2d_backprop_filter\"])\ndef conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value\n input,\n filter_sizes,\n out_backprop,\n strides,\n padding,\n use_cudnn_on_gpu=True,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=None):\n r\"\"\"Computes the gradients of convolution with respect to the filter.\n\n Args:\n input: A `Tensor`. Must be one of the following types:\n `half`, `bfloat16`, `float32`, `float64`.\n 4-D with shape `[batch, in_height, in_width, in_channels]`.\n filter_sizes: A `Tensor` of type `int32`.\n An integer vector representing the tensor shape of `filter`,\n where `filter` is a 4-D\n `[filter_height, filter_width, in_channels, out_channels]` tensor.\n out_backprop: A `Tensor`. Must have the same type as `input`.\n 4-D with shape `[batch, out_height, out_width, out_channels]`.\n Gradients w.r.t. the output of the convolution.\n strides: A list of `ints`.\n The stride of the sliding window for each dimension of the input\n of the convolution. Must be in the same order as the dimension specified\n with format.\n padding: Either the `string `\"SAME\"` or `\"VALID\"` indicating the type of\n padding algorithm to use, or a list indicating the explicit paddings at\n the start and end of each dimension. When explicit padding is used and\n data_format is `\"NHWC\"`, this should be in the form `[[0, 0], [pad_top,\n pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used\n and data_format is `\"NCHW\"`, this should be in the form `[[0, 0], [0, 0],\n [pad_top, pad_bottom], [pad_left, pad_right]]`.\n use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\"`.\n Defaults to `\"NHWC\"`.\n Specify the data format of the input and output data. With the\n default format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\n Alternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\n dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.\n 1-D tensor of length 4. The dilation factor for each dimension of\n `input`. If set to k > 1, there will be k-1 skipped cells between each\n filter element on that dimension. The dimension order is determined by\n the value of `data_format`, see above for details. Dilations in the batch\n and depth dimensions must be 1.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n padding, explicit_paddings = _convert_padding(padding)\n return gen_nn_ops.conv2d_backprop_filter(\n input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,\n explicit_paddings, data_format, dilations, name)\n\n\n@tf_export(\"nn.conv2d_backprop_input\", v1=[])\ndef conv2d_backprop_input_v2(input_sizes,\n filters,\n out_backprop,\n strides,\n padding,\n data_format=\"NHWC\",\n dilations=None,\n name=None):\n r\"\"\"Computes the gradients of convolution with respect to the input.\n\n Args:\n input_sizes: A `Tensor` of type `int32`.\n An integer vector representing the shape of `input`,\n where `input` is a 4-D `[batch, height, width, channels]` tensor.\n filters: A `Tensor`. Must be one of the following types:\n `half`, `bfloat16`, `float32`, `float64`.\n 4-D with shape\n `[filter_height, filter_width, in_channels, out_channels]`.\n out_backprop: A `Tensor`. Must have the same type as `filters`.\n 4-D with shape `[batch, out_height, out_width, out_channels]`.\n Gradients w.r.t. the output of the convolution.\n strides: A list of `ints`.\n The stride of the sliding window for each dimension of the input\n of the convolution. Must be in the same order as the dimension specified\n with format.\n padding: Either the `string `\"SAME\"` or `\"VALID\"` indicating the type of\n padding algorithm to use, or a list indicating the explicit paddings at\n the start and end of each dimension. When explicit padding is used and\n data_format is `\"NHWC\"`, this should be in the form `[[0, 0], [pad_top,\n pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used\n and data_format is `\"NCHW\"`, this should be in the form `[[0, 0], [0, 0],\n [pad_top, pad_bottom], [pad_left, pad_right]]`.\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\"`.\n Defaults to `\"NHWC\"`.\n Specify the data format of the input and output data. With the\n default format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\n Alternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\n dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.\n 1-D tensor of length 4. The dilation factor for each dimension of\n `input`. If set to k > 1, there will be k-1 skipped cells between each\n filter element on that dimension. The dimension order is determined by\n the value of `data_format`, see above for details. Dilations in the batch\n and depth dimensions must be 1.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `filters`.\n \"\"\"\n if dilations is None:\n dilations = [1, 1, 1, 1]\n return conv2d_backprop_input(input_sizes,\n filters,\n out_backprop,\n strides,\n padding,\n use_cudnn_on_gpu=True,\n data_format=data_format,\n dilations=dilations,\n name=name)\n\n\n@tf_export(v1=[\"nn.conv2d_backprop_input\"])\ndef conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value\n input_sizes,\n filter,\n out_backprop,\n strides,\n padding,\n use_cudnn_on_gpu=True,\n data_format=\"NHWC\",\n dilations=[1, 1, 1, 1],\n name=None):\n r\"\"\"Computes the gradients of convolution with respect to the input.\n\n Args:\n input_sizes: A `Tensor` of type `int32`.\n An integer vector representing the shape of `input`,\n where `input` is a 4-D `[batch, height, width, channels]` tensor.\n filter: A `Tensor`. Must be one of the following types:\n `half`, `bfloat16`, `float32`, `float64`.\n 4-D with shape\n `[filter_height, filter_width, in_channels, out_channels]`.\n out_backprop: A `Tensor`. Must have the same type as `filter`.\n 4-D with shape `[batch, out_height, out_width, out_channels]`.\n Gradients w.r.t. the output of the convolution.\n strides: A list of `ints`.\n The stride of the sliding window for each dimension of the input\n of the convolution. Must be in the same order as the dimension specified\n with format.\n padding: Either the `string `\"SAME\"` or `\"VALID\"` indicating the type of\n padding algorithm to use, or a list indicating the explicit paddings at\n the start and end of each dimension. When explicit padding is used and\n data_format is `\"NHWC\"`, this should be in the form `[[0, 0], [pad_top,\n pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used\n and data_format is `\"NCHW\"`, this should be in the form `[[0, 0], [0, 0],\n [pad_top, pad_bottom], [pad_left, pad_right]]`.\n use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\"`.\n Defaults to `\"NHWC\"`.\n Specify the data format of the input and output data. With the\n default format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\n Alternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\n dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.\n 1-D tensor of length 4. The dilation factor for each dimension of\n `input`. If set to k > 1, there will be k-1 skipped cells between each\n filter element on that dimension. The dimension order is determined by\n the value of `data_format`, see above for details. Dilations in the batch\n and depth dimensions must be 1.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `filter`.\n \"\"\"\n padding, explicit_paddings = _convert_padding(padding)\n return gen_nn_ops.conv2d_backprop_input(\n input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,\n explicit_paddings, data_format, dilations, name)\n\n\n@tf_export(v1=[\"nn.conv2d_transpose\"])\ndef conv2d_transpose(\n value,\n filter, # pylint: disable=redefined-builtin\n output_shape,\n strides,\n padding=\"SAME\",\n data_format=\"NHWC\",\n name=None):\n \"\"\"The transpose of `conv2d`.\n\n This operation is sometimes called \"deconvolution\" after [Deconvolutional\n Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is\n actually the transpose (gradient) of `conv2d` rather than an actual\n deconvolution.\n\n Args:\n value: A 4-D `Tensor` of type `float` and shape\n `[batch, height, width, in_channels]` for `NHWC` data format or\n `[batch, in_channels, height, width]` for `NCHW` data format.\n filter: A 4-D `Tensor` with the same type as `value` and shape\n `[height, width, output_channels, in_channels]`. `filter`'s\n `in_channels` dimension must match that of `value`.\n output_shape: A 1-D `Tensor` representing the output shape of the\n deconvolution op.\n strides: A list of ints. The stride of the sliding window for each\n dimension of the input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the \"returns\" section of `tf.nn.convolution` for details.\n data_format: A string. 'NHWC' and 'NCHW' are supported.\n name: Optional name for the returned tensor.\n\n Returns:\n A `Tensor` with the same type as `value`.\n\n Raises:\n ValueError: If input/output depth does not match `filter`'s shape, or if\n padding is other than `'VALID'` or `'SAME'`.\n \"\"\"\n with ops.name_scope(name, \"conv2d_transpose\",\n [value, filter, output_shape]) as name:\n if data_format not in (\"NCHW\", \"NHWC\"):\n raise ValueError(\"data_format has to be either NCHW or NHWC.\")\n value = ops.convert_to_tensor(value, name=\"value\")\n filter = ops.convert_to_tensor(filter, name=\"filter\") # pylint: disable=redefined-builtin\n axis = 3 if data_format == \"NHWC\" else 1\n if not value.get_shape().dims[axis].is_compatible_with(\n filter.get_shape()[3]):\n raise ValueError(\"input channels does not match filter's input channels, \"\n \"{} != {}\".format(value.get_shape()[axis],\n filter.get_shape()[3]))\n\n output_shape_ = ops.convert_to_tensor(output_shape, name=\"output_shape\")\n if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):\n raise ValueError(\"output_shape must have shape (4,), got {}\".format(\n output_shape_.get_shape()))\n\n if isinstance(output_shape, (list, np.ndarray)):\n # output_shape's shape should be == [4] if reached this point.\n if not filter.get_shape().dims[2].is_compatible_with(\n output_shape[axis]):\n raise ValueError(\n \"output_shape does not match filter's output channels, \"\n \"{} != {}\".format(output_shape[axis],\n filter.get_shape()[2]))\n\n if padding != \"VALID\" and padding != \"SAME\":\n raise ValueError(\"padding must be either VALID or SAME:\"\n \" {}\".format(padding))\n\n return gen_nn_ops.conv2d_backprop_input(\n input_sizes=output_shape_,\n filter=filter,\n out_backprop=value,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n\n# pylint: disable=redefined-builtin\n@tf_export(\"nn.conv2d_transpose\", v1=[])\ndef conv2d_transpose_v2(\n input,\n filters, # pylint: disable=redefined-builtin\n output_shape,\n strides,\n padding=\"SAME\",\n data_format=\"NHWC\",\n name=None):\n return conv2d_transpose(\n input,\n filters,\n output_shape,\n strides,\n padding=padding,\n data_format=data_format,\n name=name)\n# pylint: enable=redefined-builtin\nconv2d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(\n deprecation.rewrite_argument_docstring(\n conv2d_transpose.__doc__, \"filter\", \"filters\"),\n \"value\", \"input\")\n\n\n@tf_export(\"nn.atrous_conv2d_transpose\")\ndef atrous_conv2d_transpose(value,\n filters,\n output_shape,\n rate,\n padding,\n name=None):\n \"\"\"The transpose of `atrous_conv2d`.\n\n This operation is sometimes called \"deconvolution\" after [Deconvolutional\n Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is\n actually the transpose (gradient) of `atrous_conv2d` rather than an actual\n deconvolution.\n\n Args:\n value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`\n format. Its shape is `[batch, in_height, in_width, in_channels]`.\n filters: A 4-D `Tensor` with the same type as `value` and shape\n `[filter_height, filter_width, out_channels, in_channels]`. `filters`'\n `in_channels` dimension must match that of `value`. Atrous convolution is\n equivalent to standard convolution with upsampled filters with effective\n height `filter_height + (filter_height - 1) * (rate - 1)` and effective\n width `filter_width + (filter_width - 1) * (rate - 1)`, produced by\n inserting `rate - 1` zeros along consecutive elements across the\n `filters`' spatial dimensions.\n output_shape: A 1-D `Tensor` of shape representing the output shape of the\n deconvolution op.\n rate: A positive int32. The stride with which we sample input values across\n the `height` and `width` dimensions. Equivalently, the rate by which we\n upsample the filter values by inserting zeros across the `height` and\n `width` dimensions. In the literature, the same parameter is sometimes\n called `input stride` or `dilation`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n name: Optional name for the returned tensor.\n\n Returns:\n A `Tensor` with the same type as `value`.\n\n Raises:\n ValueError: If input/output depth does not match `filters`' shape, or if\n padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less\n than one, or if the output_shape is not a tensor with 4 elements.\n \"\"\"\n with ops.name_scope(name, \"atrous_conv2d_transpose\",\n [value, filters, output_shape]) as name:\n value = ops.convert_to_tensor(value, name=\"value\")\n filters = ops.convert_to_tensor(filters, name=\"filters\")\n if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):\n raise ValueError(\n \"value's input channels does not match filters' input channels, \"\n \"{} != {}\".format(value.get_shape()[3],\n filters.get_shape()[3]))\n if rate < 1:\n raise ValueError(\"rate {} cannot be less than one\".format(rate))\n\n if rate == 1:\n return conv2d_transpose(\n value,\n filters,\n output_shape,\n strides=[1, 1, 1, 1],\n padding=padding,\n data_format=\"NHWC\")\n\n output_shape_ = ops.convert_to_tensor(output_shape, name=\"output_shape\")\n if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):\n raise ValueError(\"output_shape must have shape (4,), got {}\".format(\n output_shape_.get_shape()))\n\n if isinstance(output_shape, (list, np.ndarray)):\n # output_shape's shape should be == [4] if reached this point.\n if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):\n raise ValueError(\n \"output_shape does not match filter's output channels, \"\n \"{} != {}\".format(output_shape[3],\n filters.get_shape()[2]))\n\n # We have two padding contributions. The first is used for converting \"SAME\"\n # to \"VALID\". The second is required so that the height and width of the\n # zero-padded value tensor are multiples of rate.\n\n # Padding required to reduce to \"VALID\" convolution\n if padding == \"SAME\":\n # Handle filters whose shape is unknown during graph creation.\n if filters.get_shape().is_fully_defined():\n filter_shape = filters.get_shape().as_list()\n else:\n filter_shape = array_ops.shape(filters)\n filter_height, filter_width = filter_shape[0], filter_shape[1]\n\n # Spatial dimensions of the filters and the upsampled filters in which we\n # introduce (rate - 1) zeros between consecutive filter values.\n filter_height_up = filter_height + (filter_height - 1) * (rate - 1)\n filter_width_up = filter_width + (filter_width - 1) * (rate - 1)\n\n pad_height = filter_height_up - 1\n pad_width = filter_width_up - 1\n\n # When pad_height (pad_width) is odd, we pad more to bottom (right),\n # following the same convention as conv2d().\n pad_top = pad_height // 2\n pad_bottom = pad_height - pad_top\n pad_left = pad_width // 2\n pad_right = pad_width - pad_left\n elif padding == \"VALID\":\n pad_top = 0\n pad_bottom = 0\n pad_left = 0\n pad_right = 0\n else:\n raise ValueError(\"padding must be either VALID or SAME:\"\n \" {}\".format(padding))\n\n in_height = output_shape[1] + pad_top + pad_bottom\n in_width = output_shape[2] + pad_left + pad_right\n\n # More padding so that rate divides the height and width of the input.\n pad_bottom_extra = (rate - in_height % rate) % rate\n pad_right_extra = (rate - in_width % rate) % rate\n\n # The paddings argument to space_to_batch is just the extra padding\n # component.\n space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]\n\n value = array_ops.space_to_batch(\n input=value, paddings=space_to_batch_pad, block_size=rate)\n\n input_sizes = [\n rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,\n (in_width + pad_right_extra) // rate, output_shape[3]\n ]\n\n value = gen_nn_ops.conv2d_backprop_input(\n input_sizes=input_sizes,\n filter=filters,\n out_backprop=value,\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n data_format=\"NHWC\")\n\n # The crops argument to batch_to_space includes both padding components.\n batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],\n [pad_left, pad_right + pad_right_extra]]\n\n return array_ops.batch_to_space(\n input=value, crops=batch_to_space_crop, block_size=rate)\n\n\n@tf_export(\"nn.conv3d\", v1=[])\ndef conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring\n filters,\n strides,\n padding,\n data_format=\"NDHWC\",\n dilations=None,\n name=None):\n if dilations is None:\n dilations = [1, 1, 1, 1, 1]\n return gen_nn_ops.conv3d(input, # pylint: disable=redefined-builtin\n filters,\n strides,\n padding,\n data_format=data_format,\n dilations=dilations,\n name=name)\ntf_export(v1=[\"nn.conv3d\"])(gen_nn_ops.conv3d)\nconv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(\n gen_nn_ops.conv3d.__doc__, \"filter\", \"filters\")\n\n\n@tf_export(v1=[\"nn.conv3d_transpose\"])\ndef conv3d_transpose(\n value,\n filter, # pylint: disable=redefined-builtin\n output_shape,\n strides,\n padding=\"SAME\",\n data_format=\"NDHWC\",\n name=None):\n \"\"\"The transpose of `conv3d`.\n\n This operation is sometimes called \"deconvolution\" after [Deconvolutional\n Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is\n actually the transpose (gradient) of `conv3d` rather than an actual\n deconvolution.\n\n Args:\n value: A 5-D `Tensor` of type `float` and shape\n `[batch, depth, height, width, in_channels]`.\n filter: A 5-D `Tensor` with the same type as `value` and shape\n `[depth, height, width, output_channels, in_channels]`. `filter`'s\n `in_channels` dimension must match that of `value`.\n output_shape: A 1-D `Tensor` representing the output shape of the\n deconvolution op.\n strides: A list of ints. The stride of the sliding window for each\n dimension of the input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the \"returns\" section of `tf.nn.convolution` for details.\n data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout\n of the input and output tensors. Defaults to `'NDHWC'`.\n name: Optional name for the returned tensor.\n\n Returns:\n A `Tensor` with the same type as `value`.\n\n Raises:\n ValueError: If input/output depth does not match `filter`'s shape, or if\n padding is other than `'VALID'` or `'SAME'`.\n \"\"\"\n with ops.name_scope(name, \"conv3d_transpose\",\n [value, filter, output_shape]) as name:\n value = ops.convert_to_tensor(value, name=\"value\")\n filter = ops.convert_to_tensor(filter, name=\"filter\") # pylint: disable=redefined-builtin\n axis = 1 if data_format == \"NCDHW\" else 4\n if not value.get_shape().dims[axis].is_compatible_with(\n filter.get_shape()[4]):\n raise ValueError(\"input channels does not match filter's input channels, \"\n \"{} != {}\".format(value.get_shape()[axis],\n filter.get_shape()[4]))\n\n output_shape_ = ops.convert_to_tensor(output_shape, name=\"output_shape\")\n if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):\n raise ValueError(\"output_shape must have shape (5,), got {}\".format(\n output_shape_.get_shape()))\n\n if isinstance(output_shape, (list, np.ndarray)):\n # output_shape's shape should be == [5] if reached this point.\n if not filter.get_shape().dims[3].is_compatible_with(\n output_shape[axis]):\n raise ValueError(\n \"output_shape does not match filter's output channels, \"\n \"{} != {}\".format(output_shape[axis],\n filter.get_shape()[3]))\n\n if padding != \"VALID\" and padding != \"SAME\":\n raise ValueError(\"padding must be either VALID or SAME:\"\n \" {}\".format(padding))\n\n return gen_nn_ops.conv3d_backprop_input_v2(\n input_sizes=output_shape_,\n filter=filter,\n out_backprop=value,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n\n# pylint: disable=redefined-builtin\n@tf_export(\"nn.conv3d_transpose\", v1=[])\ndef conv3d_transpose_v2(\n input,\n filters,\n output_shape,\n strides,\n padding=\"SAME\",\n data_format=\"NDHWC\",\n name=None):\n return conv3d_transpose(\n input,\n filters,\n output_shape,\n strides,\n padding=padding,\n data_format=data_format,\n name=name)\n# pylint: enable=redefined-builtin\nconv3d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(\n deprecation.rewrite_argument_docstring(\n conv3d_transpose.__doc__, \"filter\", \"filters\"),\n \"value\", \"input\")\n\n\n@tf_export(\"nn.bias_add\")\ndef bias_add(value, bias, data_format=None, name=None):\n \"\"\"Adds `bias` to `value`.\n\n This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.\n Broadcasting is supported, so `value` may have any number of dimensions.\n Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the\n case where both types are quantized.\n\n Args:\n value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,\n `int16`, `int8`, `complex64`, or `complex128`.\n bias: A 1-D `Tensor` with size matching the last dimension of `value`.\n Must be the same type as `value` unless `value` is a quantized type,\n in which case a different quantized type may be used.\n data_format: A string. 'NHWC' and 'NCHW' are supported.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `value`.\n \"\"\"\n with ops.name_scope(name, \"BiasAdd\", [value, bias]) as name:\n if not context.executing_eagerly():\n value = ops.convert_to_tensor(value, name=\"input\")\n bias = ops.convert_to_tensor(bias, dtype=value.dtype, name=\"bias\")\n return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)\n\n\ndef bias_add_v1(value, bias, name=None):\n \"\"\"Adds `bias` to `value`.\n\n This is a deprecated version of bias_add and will soon to be removed.\n\n This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.\n Broadcasting is supported, so `value` may have any number of dimensions.\n Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the\n case where both types are quantized.\n\n Args:\n value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,\n `int16`, `int8`, `complex64`, or `complex128`.\n bias: A 1-D `Tensor` with size matching the last dimension of `value`.\n Must be the same type as `value` unless `value` is a quantized type,\n in which case a different quantized type may be used.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `value`.\n \"\"\"\n with ops.name_scope(name, \"BiasAddV1\", [value, bias]) as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n bias = ops.convert_to_tensor(bias, dtype=value.dtype, name=\"bias\")\n return gen_nn_ops.bias_add_v1(value, bias, name=name)\n\n\n@tf_export(v1=[\"nn.crelu\"])\ndef crelu(features, name=None, axis=-1):\n \"\"\"Computes Concatenated ReLU.\n\n Concatenates a ReLU which selects only the positive part of the activation\n with a ReLU which selects only the *negative* part of the activation.\n Note that as a result this non-linearity doubles the depth of the activations.\n Source: [Understanding and Improving Convolutional Neural Networks via\n Concatenated Rectified Linear Units. W. Shang, et\n al.](https://arxiv.org/abs/1603.05201)\n\n Args:\n features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,\n `int16`, or `int8`.\n name: A name for the operation (optional).\n axis: The axis that the output values are concatenated along. Default is -1.\n\n Returns:\n A `Tensor` with the same type as `features`.\n \"\"\"\n with ops.name_scope(name, \"CRelu\", [features]) as name:\n features = ops.convert_to_tensor(features, name=\"features\")\n c = array_ops.concat([features, -features], axis, name=name)\n return gen_nn_ops.relu(c)\n\n\n@tf_export(\"nn.crelu\", v1=[])\ndef crelu_v2(features, axis=-1, name=None):\n return crelu(features, name=name, axis=axis)\ncrelu_v2.__doc__ = crelu.__doc__\n\n\n@tf_export(\"nn.relu6\")\ndef relu6(features, name=None):\n \"\"\"Computes Rectified Linear 6: `min(max(features, 0), 6)`.\n\n Source: [Convolutional Deep Belief Networks on CIFAR-10. A.\n Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)\n\n Args:\n features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,\n `int16`, or `int8`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `features`.\n \"\"\"\n with ops.name_scope(name, \"Relu6\", [features]) as name:\n features = ops.convert_to_tensor(features, name=\"features\")\n return gen_nn_ops.relu6(features, name=name)\n\n\n@tf_export(\"nn.leaky_relu\")\ndef leaky_relu(features, alpha=0.2, name=None):\n \"\"\"Compute the Leaky ReLU activation function.\n\n \"Rectifier Nonlinearities Improve Neural Network Acoustic Models\"\n AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013\n https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf\n\n Args:\n features: A `Tensor` representing preactivation values. Must be one of\n the following types: `float16`, `float32`, `float64`, `int32`, `int64`.\n alpha: Slope of the activation function at x < 0.\n name: A name for the operation (optional).\n\n Returns:\n The activation value.\n \"\"\"\n with ops.name_scope(name, \"LeakyRelu\", [features, alpha]) as name:\n features = ops.convert_to_tensor(features, name=\"features\")\n if features.dtype.is_integer:\n features = math_ops.to_float(features)\n if compat.forward_compatible(2018, 11, 1):\n if isinstance(alpha, np.ndarray):\n alpha = np.asscalar(alpha)\n return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)\n alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name=\"alpha\")\n return math_ops.maximum(alpha * features, features, name=name)\n\n\ndef _flatten_outer_dims(logits):\n \"\"\"Flattens logits' outer dimensions and keep its last dimension.\"\"\"\n rank = array_ops.rank(logits)\n last_dim_size = array_ops.slice(\n array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])\n output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))\n\n # Set output shape if known.\n if not context.executing_eagerly():\n shape = logits.get_shape()\n if shape is not None and shape.dims is not None:\n shape = shape.as_list()\n product = 1\n product_valid = True\n for d in shape[:-1]:\n if d is None:\n product_valid = False\n break\n else:\n product *= d\n if product_valid:\n output_shape = [product, shape[-1]]\n output.set_shape(output_shape)\n\n return output\n\n\ndef _softmax(logits, compute_op, dim=-1, name=None):\n \"\"\"Helper function for softmax and log_softmax.\n\n It reshapes and transposes the input logits into a 2-D Tensor and then invokes\n the tf.nn._softmax or tf.nn._log_softmax function. The output would be\n transposed and reshaped back.\n\n Args:\n logits: A non-empty `Tensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax\n dim: The dimension softmax would be performed on. The default is -1 which\n indicates the last dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `logits`. Same shape as `logits`.\n Raises:\n InvalidArgumentError: if `logits` is empty or `dim` is beyond the last\n dimension of `logits`.\n \"\"\"\n\n def _swap_axis(logits, dim_index, last_index, name=None):\n \"\"\"Swaps logits's dim_index and last_index.\"\"\"\n return array_ops.transpose(\n logits,\n array_ops.concat([\n math_ops.range(dim_index), [last_index],\n math_ops.range(dim_index + 1, last_index), [dim_index]\n ], 0),\n name=name)\n\n logits = ops.convert_to_tensor(logits)\n\n # We need its original shape for shape inference.\n shape = logits.get_shape()\n is_last_dim = (dim is -1) or (dim == shape.ndims - 1)\n\n if is_last_dim:\n return compute_op(logits, name=name)\n\n dim_val = dim\n if isinstance(dim, ops.Tensor):\n dim_val = tensor_util.constant_value(dim)\n if dim_val is not None and (dim_val < -shape.ndims or dim_val >= shape.ndims):\n raise errors_impl.InvalidArgumentError(\n None, None,\n \"Dimension (%d) must be in the range [%d, %d) where %d is the number of\"\n \" dimensions in the input.\" % (dim_val, -shape.ndims, shape.ndims,\n shape.ndims))\n\n # If dim is not the last dimension, we have to do a transpose so that we can\n # still perform softmax on its last dimension.\n\n # In case dim is negative (and is not last dimension -1), add shape.ndims\n ndims = array_ops.rank(logits)\n if not isinstance(dim, ops.Tensor):\n if dim < 0:\n dim += ndims\n else:\n dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)\n\n # Swap logits' dimension of dim and its last dimension.\n input_rank = array_ops.rank(logits)\n dim_axis = dim % shape.ndims\n logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))\n\n # Do the actual softmax on its last dimension.\n output = compute_op(logits)\n\n output = _swap_axis(\n output, dim_axis, math_ops.subtract(input_rank, 1), name=name)\n\n # Make shape inference work since transpose may erase its static shape.\n output.set_shape(shape)\n\n return output\n\n\n@tf_export(v1=[\"nn.softmax\", \"math.softmax\"])\[email protected]_args(None, \"dim is deprecated, use axis instead\", \"dim\")\ndef softmax(logits, axis=None, name=None, dim=None):\n \"\"\"Computes softmax activations.\n\n This function performs the equivalent of\n\n softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)\n\n Args:\n logits: A non-empty `Tensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n axis: The dimension softmax would be performed on. The default is -1 which\n indicates the last dimension.\n name: A name for the operation (optional).\n dim: Deprecated alias for `axis`.\n\n Returns:\n A `Tensor`. Has the same type and shape as `logits`.\n\n Raises:\n InvalidArgumentError: if `logits` is empty or `axis` is beyond the last\n dimension of `logits`.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"dim\", dim)\n if axis is None:\n axis = -1\n return _softmax(logits, gen_nn_ops.softmax, axis, name)\n\n\n@tf_export(\"nn.softmax\", \"math.softmax\", v1=[])\ndef softmax_v2(logits, axis=None, name=None):\n \"\"\"Computes softmax activations.\n\n This function performs the equivalent of\n\n softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)\n\n Args:\n logits: A non-empty `Tensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n axis: The dimension softmax would be performed on. The default is -1 which\n indicates the last dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type and shape as `logits`.\n\n Raises:\n InvalidArgumentError: if `logits` is empty or `axis` is beyond the last\n dimension of `logits`.\n \"\"\"\n if axis is None:\n axis = -1\n return _softmax(logits, gen_nn_ops.softmax, axis, name)\n\n\n@tf_export(v1=[\"nn.log_softmax\", \"math.log_softmax\"])\[email protected]_args(None, \"dim is deprecated, use axis instead\", \"dim\")\ndef log_softmax(logits, axis=None, name=None, dim=None):\n \"\"\"Computes log softmax activations.\n\n For each batch `i` and class `j` we have\n\n logsoftmax = logits - log(reduce_sum(exp(logits), axis))\n\n Args:\n logits: A non-empty `Tensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n axis: The dimension softmax would be performed on. The default is -1 which\n indicates the last dimension.\n name: A name for the operation (optional).\n dim: Deprecated alias for `axis`.\n\n Returns:\n A `Tensor`. Has the same type as `logits`. Same shape as `logits`.\n\n Raises:\n InvalidArgumentError: if `logits` is empty or `axis` is beyond the last\n dimension of `logits`.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"dim\", dim)\n if axis is None:\n axis = -1\n return _softmax(logits, gen_nn_ops.log_softmax, axis, name)\n\n\n@tf_export(\"nn.log_softmax\", \"math.log_softmax\", v1=[])\ndef log_softmax_v2(logits, axis=None, name=None):\n \"\"\"Computes log softmax activations.\n\n For each batch `i` and class `j` we have\n\n logsoftmax = logits - log(reduce_sum(exp(logits), axis))\n\n Args:\n logits: A non-empty `Tensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n axis: The dimension softmax would be performed on. The default is -1 which\n indicates the last dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `logits`. Same shape as `logits`.\n\n Raises:\n InvalidArgumentError: if `logits` is empty or `axis` is beyond the last\n dimension of `logits`.\n \"\"\"\n if axis is None:\n axis = -1\n return _softmax(logits, gen_nn_ops.log_softmax, axis, name)\n\n\ndef _ensure_xent_args(name, sentinel, labels, logits):\n # Make sure that all arguments were passed as named arguments.\n if sentinel is not None:\n raise ValueError(\"Only call `%s` with \"\n \"named arguments (labels=..., logits=..., ...)\" % name)\n if labels is None or logits is None:\n raise ValueError(\"Both labels and logits must be provided.\")\n\n\n@tf_export(\"nn.softmax_cross_entropy_with_logits\", v1=[])\ndef softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):\n \"\"\"Computes softmax cross entropy between `logits` and `labels`.\n\n Measures the probability error in discrete classification tasks in which the\n classes are mutually exclusive (each entry is in exactly one class). For\n example, each CIFAR-10 image is labeled with one and only one label: an image\n can be a dog or a truck, but not both.\n\n **NOTE:** While the classes are mutually exclusive, their probabilities\n need not be. All that is required is that each row of `labels` is\n a valid probability distribution. If they are not, the computation of the\n gradient will be incorrect.\n\n If using exclusive `labels` (wherein one and only\n one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.\n\n **WARNING:** This op expects unscaled logits, since it performs a `softmax`\n on `logits` internally for efficiency. Do not call this op with the\n output of `softmax`, as it will produce incorrect results.\n\n A common use case is to have logits and labels of shape\n `[batch_size, num_classes]`, but higher dimensions are supported, with\n the `axis` argument specifying the class dimension.\n\n `logits` and `labels` must have the same dtype (either `float16`, `float32`,\n or `float64`).\n\n Backpropagation will happen into both `logits` and `labels`. To disallow\n backpropagation into `labels`, pass label tensors through `tf.stop_gradient`\n before feeding it to this function.\n\n **Note that to avoid confusion, it is required to pass only named arguments to\n this function.**\n\n Args:\n labels: Each vector along the class dimension should hold a valid\n probability distribution e.g. for the case in which labels are of shape\n `[batch_size, num_classes]`, each row of `labels[i]` must be a valid\n probability distribution.\n logits: Unscaled log probabilities.\n axis: The class dimension. Defaulted to -1 which is the last dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that contains the softmax cross entropy loss. Its type is the\n same as `logits` and its shape is the same as `labels` except that it does\n not have the last dimension of `labels`.\n \"\"\"\n return softmax_cross_entropy_with_logits_v2_helper(\n labels=labels, logits=logits, axis=axis, name=name)\n\n\n@tf_export(v1=[\"nn.softmax_cross_entropy_with_logits_v2\"])\n@deprecated_args(None, \"dim is deprecated, use axis instead\", \"dim\")\ndef softmax_cross_entropy_with_logits_v2_helper(\n labels, logits, axis=None, name=None, dim=None):\n \"\"\"Computes softmax cross entropy between `logits` and `labels`.\n\n Measures the probability error in discrete classification tasks in which the\n classes are mutually exclusive (each entry is in exactly one class). For\n example, each CIFAR-10 image is labeled with one and only one label: an image\n can be a dog or a truck, but not both.\n\n **NOTE:** While the classes are mutually exclusive, their probabilities\n need not be. All that is required is that each row of `labels` is\n a valid probability distribution. If they are not, the computation of the\n gradient will be incorrect.\n\n If using exclusive `labels` (wherein one and only\n one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.\n\n **WARNING:** This op expects unscaled logits, since it performs a `softmax`\n on `logits` internally for efficiency. Do not call this op with the\n output of `softmax`, as it will produce incorrect results.\n\n A common use case is to have logits and labels of shape\n `[batch_size, num_classes]`, but higher dimensions are supported, with\n the `axis` argument specifying the class dimension.\n\n `logits` and `labels` must have the same dtype (either `float16`, `float32`,\n or `float64`).\n\n Backpropagation will happen into both `logits` and `labels`. To disallow\n backpropagation into `labels`, pass label tensors through `tf.stop_gradient`\n before feeding it to this function.\n\n **Note that to avoid confusion, it is required to pass only named arguments to\n this function.**\n\n Args:\n labels: Each vector along the class dimension should hold a valid\n probability distribution e.g. for the case in which labels are of shape\n `[batch_size, num_classes]`, each row of `labels[i]` must be a valid\n probability distribution.\n logits: Unscaled log probabilities.\n axis: The class dimension. Defaulted to -1 which is the last dimension.\n name: A name for the operation (optional).\n dim: Deprecated alias for axis.\n\n Returns:\n A `Tensor` that contains the softmax cross entropy loss. Its type is the\n same as `logits` and its shape is the same as `labels` except that it does\n not have the last dimension of `labels`.\n \"\"\"\n # TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This\n # could break users who call this with bad labels, but disregard the bad\n # results.\n axis = deprecated_argument_lookup(\"axis\", axis, \"dim\", dim)\n del dim\n if axis is None:\n axis = -1\n\n with ops.name_scope(name, \"softmax_cross_entropy_with_logits\",\n [logits, labels]) as name:\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n labels = ops.convert_to_tensor(labels, name=\"labels\")\n convert_to_float32 = (\n logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)\n precise_logits = math_ops.cast(\n logits, dtypes.float32) if convert_to_float32 else logits\n # labels and logits must be of the same type\n labels = math_ops.cast(labels, precise_logits.dtype)\n input_rank = array_ops.rank(precise_logits)\n # For shape inference.\n shape = logits.get_shape()\n\n # Move the dim to the end if dim is not the last dimension.\n if axis != -1:\n\n def _move_dim_to_end(tensor, dim_index, rank):\n return array_ops.transpose(\n tensor,\n array_ops.concat([\n math_ops.range(dim_index),\n math_ops.range(dim_index + 1, rank), [dim_index]\n ], 0))\n\n precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)\n labels = _move_dim_to_end(labels, axis, input_rank)\n\n input_shape = array_ops.shape(precise_logits)\n\n # Make precise_logits and labels into matrices.\n precise_logits = _flatten_outer_dims(precise_logits)\n labels = _flatten_outer_dims(labels)\n\n # Do the actual op computation.\n # The second output tensor contains the gradients. We use it in\n # _CrossEntropyGrad() in nn_grad but not here.\n cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(\n precise_logits, labels, name=name)\n\n # The output cost shape should be the input minus axis.\n output_shape = array_ops.slice(input_shape, [0],\n [math_ops.subtract(input_rank, 1)])\n cost = array_ops.reshape(cost, output_shape)\n\n # Make shape inference work since reshape and transpose may erase its static\n # shape.\n if not context.executing_eagerly(\n ) and shape is not None and shape.dims is not None:\n shape = shape.as_list()\n del shape[axis]\n cost.set_shape(shape)\n\n if convert_to_float32:\n return math_ops.cast(cost, logits.dtype)\n else:\n return cost\n\n\n_XENT_DEPRECATION = \"\"\"\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee `tf.nn.softmax_cross_entropy_with_logits_v2`.\n\"\"\"\n\n\n@tf_export(v1=[\"nn.softmax_cross_entropy_with_logits\"])\[email protected](date=None, instructions=_XENT_DEPRECATION)\ndef softmax_cross_entropy_with_logits(\n _sentinel=None, # pylint: disable=invalid-name\n labels=None,\n logits=None,\n dim=-1,\n name=None):\n \"\"\"Computes softmax cross entropy between `logits` and `labels`.\n\n Measures the probability error in discrete classification tasks in which the\n classes are mutually exclusive (each entry is in exactly one class). For\n example, each CIFAR-10 image is labeled with one and only one label: an image\n can be a dog or a truck, but not both.\n\n **NOTE:** While the classes are mutually exclusive, their probabilities\n need not be. All that is required is that each row of `labels` is\n a valid probability distribution. If they are not, the computation of the\n gradient will be incorrect.\n\n If using exclusive `labels` (wherein one and only\n one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.\n\n **WARNING:** This op expects unscaled logits, since it performs a `softmax`\n on `logits` internally for efficiency. Do not call this op with the\n output of `softmax`, as it will produce incorrect results.\n\n A common use case is to have logits and labels of shape\n `[batch_size, num_classes]`, but higher dimensions are supported, with\n the `dim` argument specifying the class dimension.\n\n Backpropagation will happen only into `logits`. To calculate a cross entropy\n loss that allows backpropagation into both `logits` and `labels`, see\n `tf.nn.softmax_cross_entropy_with_logits_v2`.\n\n **Note that to avoid confusion, it is required to pass only named arguments to\n this function.**\n\n Args:\n _sentinel: Used to prevent positional parameters. Internal, do not use.\n labels: Each vector along the class dimension should hold a valid\n probability distribution e.g. for the case in which labels are of shape\n `[batch_size, num_classes]`, each row of `labels[i]` must be a valid\n probability distribution.\n logits: Unscaled log probabilities.\n dim: The class dimension. Defaulted to -1 which is the last dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that contains the softmax cross entropy loss. Its type is the\n same as `logits` and its shape is the same as `labels` except that it does\n not have the last dimension of `labels`.\n \"\"\"\n _ensure_xent_args(\"softmax_cross_entropy_with_logits\", _sentinel, labels,\n logits)\n\n with ops.name_scope(name, \"softmax_cross_entropy_with_logits_sg\",\n [logits, labels]) as name:\n labels = array_ops.stop_gradient(labels, name=\"labels_stop_gradient\")\n\n return softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits, axis=dim, name=name)\n\n\n@tf_export(\"nn.sparse_softmax_cross_entropy_with_logits\")\ndef sparse_softmax_cross_entropy_with_logits(\n _sentinel=None, # pylint: disable=invalid-name\n labels=None,\n logits=None,\n name=None):\n \"\"\"Computes sparse softmax cross entropy between `logits` and `labels`.\n\n Measures the probability error in discrete classification tasks in which the\n classes are mutually exclusive (each entry is in exactly one class). For\n example, each CIFAR-10 image is labeled with one and only one label: an image\n can be a dog or a truck, but not both.\n\n **NOTE:** For this operation, the probability of a given label is considered\n exclusive. That is, soft classes are not allowed, and the `labels` vector\n must provide a single specific index for the true class for each row of\n `logits` (each minibatch entry). For soft softmax classification with\n a probability distribution for each entry, see\n `softmax_cross_entropy_with_logits_v2`.\n\n **WARNING:** This op expects unscaled logits, since it performs a `softmax`\n on `logits` internally for efficiency. Do not call this op with the\n output of `softmax`, as it will produce incorrect results.\n\n A common use case is to have logits of shape\n `[batch_size, num_classes]` and have labels of shape\n `[batch_size]`, but higher dimensions are supported, in which\n case the `dim`-th dimension is assumed to be of size `num_classes`.\n `logits` must have the dtype of `float16`, `float32`, or `float64`, and\n `labels` must have the dtype of `int32` or `int64`.\n\n **Note that to avoid confusion, it is required to pass only named arguments to\n this function.**\n\n Args:\n _sentinel: Used to prevent positional parameters. Internal, do not use.\n labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of\n `labels` and result) and dtype `int32` or `int64`. Each entry in `labels`\n must be an index in `[0, num_classes)`. Other values will raise an\n exception when this op is run on CPU, and return `NaN` for corresponding\n loss and gradient rows on GPU.\n logits: Unscaled log probabilities of shape\n `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or\n `float64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of the same shape as `labels` and of the same type as `logits`\n with the softmax cross entropy loss.\n\n Raises:\n ValueError: If logits are scalars (need to have rank >= 1) or if the rank\n of the labels is not equal to the rank of the logits minus one.\n \"\"\"\n _ensure_xent_args(\"sparse_softmax_cross_entropy_with_logits\", _sentinel,\n labels, logits)\n\n # TODO(pcmurray) Raise an error when the label is not an index in\n # [0, num_classes). Note: This could break users who call this with bad\n # labels, but disregard the bad results.\n\n # Reshape logits and labels to rank 2.\n with ops.name_scope(name, \"SparseSoftmaxCrossEntropyWithLogits\",\n [labels, logits]):\n labels = ops.convert_to_tensor(labels)\n logits = ops.convert_to_tensor(logits)\n precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(\n logits.dtype) == dtypes.float16) else logits\n\n # Store label shape for result later.\n labels_static_shape = labels.get_shape()\n labels_shape = array_ops.shape(labels)\n static_shapes_fully_defined = (\n labels_static_shape.is_fully_defined() and\n logits.get_shape()[:-1].is_fully_defined())\n if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:\n raise ValueError(\n \"Logits cannot be scalars - received shape %s.\" % logits.get_shape())\n if logits.get_shape().ndims is not None and (\n labels_static_shape.ndims is not None and\n labels_static_shape.ndims != logits.get_shape().ndims - 1):\n raise ValueError(\"Rank mismatch: Rank of labels (received %s) should \"\n \"equal rank of logits minus 1 (received %s).\" %\n (labels_static_shape.ndims, logits.get_shape().ndims))\n if (static_shapes_fully_defined and\n labels_static_shape != logits.get_shape()[:-1]):\n raise ValueError(\"Shape mismatch: The shape of labels (received %s) \"\n \"should equal the shape of logits except for the last \"\n \"dimension (received %s).\" % (labels_static_shape,\n logits.get_shape()))\n # Check if no reshapes are required.\n if logits.get_shape().ndims == 2:\n cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(\n precise_logits, labels, name=name)\n if logits.dtype == dtypes.float16:\n return math_ops.cast(cost, dtypes.float16)\n else:\n return cost\n\n # Perform a check of the dynamic shapes if the static shapes are not fully\n # defined.\n shape_checks = []\n if not static_shapes_fully_defined:\n shape_checks.append(\n check_ops.assert_equal(\n array_ops.shape(labels),\n array_ops.shape(logits)[:-1]))\n with ops.control_dependencies(shape_checks):\n # Reshape logits to 2 dim, labels to 1 dim.\n num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]\n precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])\n labels = array_ops.reshape(labels, [-1])\n # The second output tensor contains the gradients. We use it in\n # _CrossEntropyGrad() in nn_grad but not here.\n cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(\n precise_logits, labels, name=name)\n cost = array_ops.reshape(cost, labels_shape)\n cost.set_shape(labels_static_shape)\n if logits.dtype == dtypes.float16:\n return math_ops.cast(cost, dtypes.float16)\n else:\n return cost\n\n\n@tf_export(\"nn.avg_pool\")\ndef avg_pool(value, ksize, strides, padding, data_format=\"NHWC\", name=None):\n \"\"\"Performs the average pooling on the input.\n\n Each entry in `output` is the mean of the corresponding size `ksize`\n window in `value`.\n\n Args:\n value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type\n `float32`, `float64`, `qint8`, `quint8`, or `qint32`.\n ksize: A list or tuple of 4 ints. The size of the window for each dimension\n of the input tensor.\n strides: A list or tuple of 4 ints. The stride of the sliding window for\n each dimension of the input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the \"returns\" section of `tf.nn.convolution` for details.\n data_format: A string. 'NHWC' and 'NCHW' are supported.\n name: Optional name for the operation.\n\n Returns:\n A `Tensor` with the same type as `value`. The average pooled output tensor.\n \"\"\"\n with ops.name_scope(name, \"AvgPool\", [value]) as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n return gen_nn_ops.avg_pool(\n value,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n\n@tf_export(\"nn.max_pool\")\ndef max_pool(value, ksize, strides, padding, data_format=\"NHWC\", name=None):\n \"\"\"Performs the max pooling on the input.\n\n Args:\n value: A 4-D `Tensor` of the format specified by `data_format`.\n ksize: A list or tuple of 4 ints. The size of the window for each dimension\n of the input tensor.\n strides: A list or tuple of 4 ints. The stride of the sliding window for\n each dimension of the input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the \"returns\" section of `tf.nn.convolution` for details.\n data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.\n name: Optional name for the operation.\n\n Returns:\n A `Tensor` of format specified by `data_format`.\n The max pooled output tensor.\n \"\"\"\n with ops.name_scope(name, \"MaxPool\", [value]) as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n return gen_nn_ops.max_pool(\n value,\n ksize=ksize,\n strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n\n# pylint: disable=redefined-builtin\n@tf_export(\"nn.max_pool_with_argmax\", v1=[])\ndef max_pool_with_argmax_v2(input,\n ksize,\n strides,\n padding,\n data_format=\"NHWC\",\n output_dtype=dtypes.int64,\n name=None):\n \"\"\"Performs max pooling on the input and outputs both max values and indices.\n\n The indices in `argmax` are flattened, so that a maximum value at position\n `[b, y, x, c]` becomes flattened index\n `((b * height + y) * width + x) * channels + c`.\n\n The indices returned are always in `[0, height) x [0, width)` before\n flattening, even if padding is involved and the mathematically correct answer\n is outside (either negative or too large). This is a bug, but fixing it is\n difficult to do in a safe backwards compatible way, especially due to\n flattening.\n\n Args:\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`,\n `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,\n `uint32`, `uint64`.\n 4-D with shape `[batch, height, width, channels]`. Input to pool over.\n ksize: A list of `ints` that has length `>= 4`.\n The size of the window for each dimension of the input tensor.\n strides: A list of `ints` that has length `>= 4`.\n The stride of the sliding window for each dimension of the\n input tensor.\n padding: A `string` from: `\"SAME\", \"VALID\"`.\n The type of padding algorithm to use.\n data_format: An optional `string`, must be set to `\"NHWC\"`. Defaults to\n `\"NHWC\"`.\n Specify the data format of the input and output data.\n output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.\n Defaults to `tf.int64`.\n The dtype of the returned argmax tensor.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (output, argmax).\n\n output: A `Tensor`. Has the same type as `input`.\n argmax: A `Tensor` of type `output_dtype`.\n \"\"\"\n\n if data_format != \"NHWC\":\n raise ValueError(\"Data formats other than 'NHWC' are not yet supported\")\n\n return gen_nn_ops.max_pool_with_argmax(input=input,\n ksize=ksize,\n strides=strides,\n padding=padding,\n Targmax=output_dtype,\n name=name)\n\n# pylint: enable=redefined-builtin\n\n\[email protected](\"Conv2D\", \"flops\")\ndef _calc_conv_flops(graph, node):\n \"\"\"Calculates the compute resources needed for Conv2D.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n filter_shape = graph_util.tensor_shape_from_node_def_name(\n graph, node.input[1])\n filter_shape.assert_is_fully_defined()\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n filter_height = int(filter_shape[0])\n filter_width = int(filter_shape[1])\n filter_in_depth = int(filter_shape[2])\n output_count = np.prod(output_shape.as_list(), dtype=np.int64)\n return ops.OpStats(\n \"flops\",\n (output_count * filter_in_depth * filter_height * filter_width * 2))\n\n\[email protected](\"DepthwiseConv2dNative\", \"flops\")\ndef _calc_depthwise_conv_flops(graph, node):\n \"\"\"Calculates the compute resources needed for DepthwiseConv2dNative.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n filter_shape = graph_util.tensor_shape_from_node_def_name(\n graph, node.input[1])\n filter_shape.assert_is_fully_defined()\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n filter_height = int(filter_shape[0])\n filter_width = int(filter_shape[1])\n output_count = np.prod(output_shape.as_list(), dtype=np.int64)\n return ops.OpStats(\"flops\", (output_count * filter_height * filter_width * 2))\n\n\[email protected](\"BiasAdd\", \"flops\")\ndef _calc_bias_add_flops(graph, node):\n \"\"\"Calculates the computing needed for BiasAdd.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n input_count = np.prod(input_shape.as_list())\n return ops.OpStats(\"flops\", input_count)\n\n\n@tf_export(v1=[\"nn.xw_plus_b\"])\ndef xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name\n \"\"\"Computes matmul(x, weights) + biases.\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n \"xw_plus_b\" is used.\n\n Returns:\n A 2-D Tensor computing matmul(x, weights) + biases.\n Dimensions typically: batch, out_units.\n \"\"\"\n with ops.name_scope(name, \"xw_plus_b\", [x, weights, biases]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n biases = ops.convert_to_tensor(biases, name=\"biases\")\n mm = math_ops.matmul(x, weights)\n return bias_add(mm, biases, name=name)\n\n\ndef xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name\n \"\"\"Computes matmul(x, weights) + biases.\n\n This is a deprecated version of that will soon be removed.\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n \"xw_plus_b_v1\" is used.\n\n Returns:\n A 2-D Tensor computing matmul(x, weights) + biases.\n Dimensions typically: batch, out_units.\n \"\"\"\n with ops.name_scope(name, \"xw_plus_b_v1\", [x, weights, biases]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n biases = ops.convert_to_tensor(biases, name=\"biases\")\n mm = math_ops.matmul(x, weights)\n return bias_add_v1(mm, biases, name=name)\n\n\ndef _get_noise_shape(x, noise_shape):\n # If noise_shape is none return immediately.\n if noise_shape is None:\n return array_ops.shape(x)\n\n try:\n # Best effort to figure out the intended shape.\n # If not possible, let the op to handle it.\n # In eager mode exception will show up.\n noise_shape_ = tensor_shape.as_shape(noise_shape)\n except (TypeError, ValueError):\n return noise_shape\n\n if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):\n new_dims = []\n for i, dim in enumerate(x.shape.dims):\n if noise_shape_.dims[i].value is None and dim.value is not None:\n new_dims.append(dim.value)\n else:\n new_dims.append(noise_shape_.dims[i].value)\n return tensor_shape.TensorShape(new_dims)\n\n return noise_shape\n\n\n@tf_export(v1=[\"nn.dropout\"])\[email protected]_args(None, \"Please use `rate` instead of `keep_prob`. \"\n \"Rate should be set to `rate = 1 - keep_prob`.\",\n \"keep_prob\")\ndef dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,\n rate=None): # pylint: disable=invalid-name\n \"\"\"Computes dropout.\n\n For each element of `x`, with probability `rate`, outputs `0`, and otherwise\n scales up the input by `1 / (1-rate)`. The scaling is such that the expected\n sum is unchanged.\n\n By default, each element is kept or dropped independently. If `noise_shape`\n is specified, it must be\n [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`\n will make independent decisions. For example, if `shape(x) = [k, l, m, n]`\n and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be\n kept independently and each row and column will be kept or not kept together.\n\n Args:\n x: A floating point tensor.\n keep_prob: (deprecated) A deprecated alias for `(1-rate)`.\n noise_shape: A 1-D `Tensor` of type `int32`, representing the\n shape for randomly generated keep/drop flags.\n seed: A Python integer. Used to create random seeds. See\n `tf.set_random_seed` for behavior.\n name: A name for this operation (optional).\n rate: A scalar `Tensor` with the same type as `x`. The probability that each\n element of `x` is discarded.\n\n Returns:\n A Tensor of the same shape of `x`.\n\n Raises:\n ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating\n point tensor.\n \"\"\"\n try:\n keep = 1. - keep_prob if keep_prob is not None else None\n except TypeError:\n raise ValueError(\"keep_prob must be a floating point number or Tensor \"\n \"(got %r)\" % keep_prob)\n\n rate = deprecation.deprecated_argument_lookup(\n \"rate\", rate,\n \"keep_prob\", keep)\n\n if rate is None:\n raise ValueError(\"You must provide a rate to dropout.\")\n\n return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\n\n\n@tf_export(\"nn.dropout\", v1=[])\ndef dropout_v2(x, rate, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name\n \"\"\"Computes dropout.\n\n With probability `rate`, drops elements of `x`. Input that are kept are\n scaled up by `1 / (1 - rate)`, otherwise outputs `0`. The scaling is so that\n the expected sum is unchanged.\n\n By default, each element is kept or dropped independently. If `noise_shape`\n is specified, it must be\n [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`\n will make independent decisions. For example, if `shape(x) = [k, l, m, n]`\n and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be\n kept independently and each row and column will be kept or not kept together.\n\n Args:\n x: A floating point tensor.\n rate: A scalar `Tensor` with the same type as x. The probability\n that each element is dropped. For example, setting rate=0.1 would drop\n 10% of input elements.\n noise_shape: A 1-D `Tensor` of type `int32`, representing the\n shape for randomly generated keep/drop flags.\n seed: A Python integer. Used to create random seeds. See\n `tf.set_random_seed`\n for behavior.\n name: A name for this operation (optional).\n\n Returns:\n A Tensor of the same shape of `x`.\n\n Raises:\n ValueError: If `keep_prob` is not in `(0, 1]` or if `x` is not a floating\n point tensor.\n \"\"\"\n with ops.name_scope(name, \"dropout\", [x]) as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if not x.dtype.is_floating:\n raise ValueError(\"x has to be a floating point tensor since it's going to\"\n \" be scaled. Got a %s tensor instead.\" % x.dtype)\n if isinstance(rate, numbers.Real) and not (rate >= 0 and rate < 1):\n raise ValueError(\"rate must be a scalar tensor or a float in the \"\n \"range [0, 1), got %g\" % rate)\n\n # Early return if nothing needs to be dropped.\n if isinstance(rate, numbers.Real) and rate == 0:\n return x\n if context.executing_eagerly():\n if isinstance(rate, ops.EagerTensor):\n if rate.numpy() == 0:\n return x\n else:\n rate = ops.convert_to_tensor(\n rate, dtype=x.dtype, name=\"rate\")\n rate.get_shape().assert_is_compatible_with(tensor_shape.scalar())\n\n # Do nothing if we know rate == 0\n if tensor_util.constant_value(rate) == 0:\n return x\n\n noise_shape = _get_noise_shape(x, noise_shape)\n\n keep_prob = 1 - rate\n # uniform [keep_prob, 1.0 + keep_prob)\n random_tensor = keep_prob\n random_tensor += random_ops.random_uniform(\n noise_shape, seed=seed, dtype=x.dtype)\n # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)\n binary_tensor = math_ops.floor(random_tensor)\n ret = math_ops.divide(x, keep_prob) * binary_tensor\n if not context.executing_eagerly():\n ret.set_shape(x.get_shape())\n return ret\n\n\n@tf_export(\"math.top_k\", \"nn.top_k\")\ndef top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin\n \"\"\"Finds values and indices of the `k` largest entries for the last dimension.\n\n If the input is a vector (rank=1), finds the `k` largest entries in the vector\n and outputs their values and indices as vectors. Thus `values[j]` is the\n `j`-th largest entry in `input`, and its index is `indices[j]`.\n\n For matrices (resp. higher rank input), computes the top `k` entries in each\n row (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\n If two elements are equal, the lower-index element appears first.\n\n Args:\n input: 1-D or higher `Tensor` with last dimension at least `k`.\n k: 0-D `int32` `Tensor`. Number of top elements to look for along the last\n dimension (along each row for matrices).\n sorted: If true the resulting `k` elements will be sorted by the values in\n descending order.\n name: Optional name for the operation.\n\n Returns:\n values: The `k` largest elements along each last dimensional slice.\n indices: The indices of `values` within the last dimension of `input`.\n \"\"\"\n return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)\n\n\ndef nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Finds values of the `n`-th order statistic for the last dmension.\n\n If the input is a vector (rank-1), finds the entries which is the nth-smallest\n value in the vector and outputs their values as scalar tensor.\n\n For matrices (resp. higher rank input), computes the entries which is the\n nth-smallest value in each row (resp. vector along the last dimension). Thus,\n\n values.shape = input.shape[:-1]\n\n Args:\n input: 1-D or higher `Tensor` with last dimension at least `n+1`.\n n: A `Tensor` of type `int32`.\n 0-D. Position of sorted vector to select along the last dimension (along\n each row for matrices). Valid range of n is `[0, input.shape[:-1])`\n reverse: An optional `bool`. Defaults to `False`.\n When set to True, find the nth-largest value in the vector and vice\n versa.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n The `n`-th order statistic along each last dimensional slice.\n \"\"\"\n return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)\n\n\n@tf_export(v1=[\"nn.fractional_max_pool\"])\[email protected](date=None, instructions=\"`seed2` and `deterministic` \"\n \"args are deprecated. Use fractional_max_pool_v2.\")\ndef fractional_max_pool(value,\n pooling_ratio,\n pseudo_random=False,\n overlapping=False,\n deterministic=False,\n seed=0,\n seed2=0,\n name=None): # pylint: disable=redefined-builtin\n r\"\"\"Performs fractional max pooling on the input.\n\n This is a deprecated version of `fractional_max_pool`.\n\n Fractional max pooling is slightly different than regular max pooling. In\n regular max pooling, you downsize an input set by taking the maximum value of\n smaller N x N subsections of the set (often 2x2), and try to reduce the set by\n a factor of N, where N is an integer. Fractional max pooling, as you might\n expect from the word \"fractional\", means that the overall reduction ratio N\n does not have to be an integer.\n\n The sizes of the pooling regions are generated randomly but are fairly\n uniform. For example, let's look at the height dimension, and the constraints\n on the list of rows that will be pool boundaries.\n\n First we define the following:\n\n 1. input_row_length : the number of rows from the input set\n 2. output_row_length : which will be smaller than the input\n 3. alpha = input_row_length / output_row_length : our reduction ratio\n 4. K = floor(alpha)\n 5. row_pooling_sequence : this is the result list of pool boundary rows\n\n Then, row_pooling_sequence should satisfy:\n\n 1. a[0] = 0 : the first value of the sequence is 0\n 2. a[end] = input_row_length : the last value of the sequence is the size\n 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size\n 4. length(row_pooling_sequence) = output_row_length+1\n\n For more details on fractional max pooling, see this paper: [Benjamin Graham,\n Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)\n\n Args:\n value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.\n pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for\n each dimension of `value`, currently only supports row and col dimension\n and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,\n 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't\n allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling\n ratio on height and width dimensions respectively.\n pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,\n generates the pooling sequence in a pseudorandom fashion, otherwise, in a\n random fashion. Check paper [Benjamin Graham, Fractional\n Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between\n pseudorandom and random.\n overlapping: An optional `bool`. Defaults to `False`. When set to `True`,\n it means when pooling, the values at the boundary of adjacent pooling\n cells are used by both cells. For example:\n `index 0 1 2 3 4`\n `value 20 5 16 3 7`\n If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used\n twice. The result would be [20, 16] for fractional max pooling.\n deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`\n instead.\n seed: An optional `int`. Defaults to `0`. If set to be non-zero, the\n random number generator is seeded by the given seed. Otherwise it is\n seeded by a random seed.\n seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,\n `col_pooling_sequence`).\n output: Output `Tensor` after fractional max pooling. Has the same type as\n `value`.\n row_pooling_sequence: A `Tensor` of type `int64`.\n col_pooling_sequence: A `Tensor` of type `int64`.\n \"\"\"\n return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,\n overlapping, deterministic, seed, seed2,\n name)\n\n\n@tf_export(\"nn.fractional_max_pool\", v1=[])\ndef fractional_max_pool_v2(value,\n pooling_ratio,\n pseudo_random=False,\n overlapping=False,\n seed=0,\n name=None): # pylint: disable=redefined-builtin\n r\"\"\"Performs fractional max pooling on the input.\n\n Fractional max pooling is slightly different than regular max pooling. In\n regular max pooling, you downsize an input set by taking the maximum value of\n smaller N x N subsections of the set (often 2x2), and try to reduce the set by\n a factor of N, where N is an integer. Fractional max pooling, as you might\n expect from the word \"fractional\", means that the overall reduction ratio N\n does not have to be an integer.\n\n The sizes of the pooling regions are generated randomly but are fairly\n uniform. For example, let's look at the height dimension, and the constraints\n on the list of rows that will be pool boundaries.\n\n First we define the following:\n\n 1. input_row_length : the number of rows from the input set\n 2. output_row_length : which will be smaller than the input\n 3. alpha = input_row_length / output_row_length : our reduction ratio\n 4. K = floor(alpha)\n 5. row_pooling_sequence : this is the result list of pool boundary rows\n\n Then, row_pooling_sequence should satisfy:\n\n 1. a[0] = 0 : the first value of the sequence is 0\n 2. a[end] = input_row_length : the last value of the sequence is the size\n 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size\n 4. length(row_pooling_sequence) = output_row_length+1\n\n For more details on fractional max pooling, see this paper: [Benjamin Graham,\n Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)\n\n Args:\n value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.\n pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for\n each dimension of `value`, currently only supports row and col dimension\n and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,\n 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't\n allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling\n ratio on height and width dimensions respectively.\n pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,\n generates the pooling sequence in a pseudorandom fashion, otherwise, in a\n random fashion. Check paper [Benjamin Graham, Fractional\n Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between\n pseudorandom and random.\n overlapping: An optional `bool`. Defaults to `False`. When set to `True`,\n it means when pooling, the values at the boundary of adjacent pooling\n cells are used by both cells. For example:\n `index 0 1 2 3 4`\n `value 20 5 16 3 7`\n If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used\n twice. The result would be [20, 16] for fractional max pooling.\n seed: An optional `int`. Defaults to `0`. If set to be non-zero, the\n random number generator is seeded by the given seed. Otherwise it is\n seeded by a random seed.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,\n `col_pooling_sequence`).\n output: Output `Tensor` after fractional max pooling. Has the same type as\n `value`.\n row_pooling_sequence: A `Tensor` of type `int64`.\n col_pooling_sequence: A `Tensor` of type `int64`.\n \"\"\"\n if seed == 0:\n return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,\n overlapping, deterministic=False,\n seed=0, seed2=0, name=name)\n else:\n seed1, seed2 = random_seed.get_seed(seed)\n return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,\n overlapping, deterministic=True,\n seed=seed1, seed2=seed2, name=name)\n\n\n@tf_export(v1=[\"nn.fractional_avg_pool\"])\[email protected](date=None, instructions=\"`seed2` and `deterministic` \"\n \"args are deprecated. Use fractional_avg_pool_v2.\")\ndef fractional_avg_pool(value,\n pooling_ratio,\n pseudo_random=False,\n overlapping=False,\n deterministic=False,\n seed=0,\n seed2=0,\n name=None): # pylint: disable=redefined-builtin\n r\"\"\"Performs fractional average pooling on the input.\n\n This is a deprecated version of `fractional_avg_pool`.\n\n Fractional average pooling is similar to Fractional max pooling in the pooling\n region generation step. The only difference is that after pooling regions are\n generated, a mean operation is performed instead of a max operation in each\n pooling region.\n\n Args:\n value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.\n pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for\n each dimension of `value`, currently only supports row and col dimension\n and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,\n 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't\n allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling\n ratio on height and width dimensions respectively.\n pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,\n generates the pooling sequence in a pseudorandom fashion, otherwise, in a\n random fashion. Check paper [Benjamin Graham, Fractional\n Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between\n pseudorandom and random.\n overlapping: An optional `bool`. Defaults to `False`. When set to `True`,\n it means when pooling, the values at the boundary of adjacent pooling\n cells are used by both cells. For example:\n `index 0 1 2 3 4`\n `value 20 5 16 3 7`\n If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used\n twice. The result would be [20, 16] for fractional avg pooling.\n deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`\n instead.\n seed: An optional `int`. Defaults to `0`. If set to be non-zero, the\n random number generator is seeded by the given seed. Otherwise it is\n seeded by a random seed.\n seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,\n `col_pooling_sequence`).\n output: Output `Tensor` after fractional avg pooling. Has the same type as\n `value`.\n row_pooling_sequence: A `Tensor` of type `int64`.\n col_pooling_sequence: A `Tensor` of type `int64`.\n \"\"\"\n return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,\n overlapping, deterministic, seed, seed2,\n name=name)\n\n\n@tf_export(\"nn.fractional_avg_pool\", v1=[])\ndef fractional_avg_pool_v2(value,\n pooling_ratio,\n pseudo_random=False,\n overlapping=False,\n seed=0,\n name=None): # pylint: disable=redefined-builtin\n r\"\"\"Performs fractional average pooling on the input.\n\n Fractional average pooling is similar to Fractional max pooling in the pooling\n region generation step. The only difference is that after pooling regions are\n generated, a mean operation is performed instead of a max operation in each\n pooling region.\n\n Args:\n value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.\n pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for\n each dimension of `value`, currently only supports row and col dimension\n and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,\n 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't\n allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling\n ratio on height and width dimensions respectively.\n pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,\n generates the pooling sequence in a pseudorandom fashion, otherwise, in a\n random fashion. Check paper [Benjamin Graham, Fractional\n Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between\n pseudorandom and random.\n overlapping: An optional `bool`. Defaults to `False`. When set to `True`,\n it means when pooling, the values at the boundary of adjacent pooling\n cells are used by both cells. For example:\n `index 0 1 2 3 4`\n `value 20 5 16 3 7`\n If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used\n twice. The result would be [20, 16] for fractional avg pooling.\n seed: An optional `int`. Defaults to `0`. If set to be non-zero, the\n random number generator is seeded by the given seed. Otherwise it is\n seeded by a random seed.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,\n `col_pooling_sequence`).\n output: Output `Tensor` after fractional avg pooling. Has the same type as\n `value`.\n row_pooling_sequence: A `Tensor` of type `int64`.\n col_pooling_sequence: A `Tensor` of type `int64`.\n \"\"\"\n if seed == 0:\n return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,\n overlapping, deterministic=False,\n seed=0, seed2=0, name=name)\n else:\n seed1, seed2 = random_seed.get_seed(seed)\n return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,\n overlapping, deterministic=True,\n seed=seed1, seed2=seed2, name=name)\n\n\n@tf_export(v1=[\"nn.conv1d\"])\[email protected]_arg_values(\n None,\n \"`NCHW` for data_format is deprecated, use `NCW` instead\",\n warn_once=True,\n data_format=\"NCHW\")\[email protected]_arg_values(\n None,\n \"`NHWC` for data_format is deprecated, use `NWC` instead\",\n warn_once=True,\n data_format=\"NHWC\")\ndef conv1d(value,\n filters,\n stride,\n padding,\n use_cudnn_on_gpu=None,\n data_format=None,\n name=None):\n r\"\"\"Computes a 1-D convolution given 3-D input and filter tensors.\n\n Given an input tensor of shape\n [batch, in_width, in_channels]\n if data_format is \"NWC\", or\n [batch, in_channels, in_width]\n if data_format is \"NCW\",\n and a filter / kernel tensor of shape\n [filter_width, in_channels, out_channels], this op reshapes\n the arguments to pass them to conv2d to perform the equivalent\n convolution operation.\n\n Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.\n For example, if `data_format` does not start with \"NC\", a tensor of shape\n [batch, in_width, in_channels]\n is reshaped to\n [batch, 1, in_width, in_channels],\n and the filter is reshaped to\n [1, filter_width, in_channels, out_channels].\n The result is then reshaped back to\n [batch, out_width, out_channels]\n \\(where out_width is a function of the stride and padding as in conv2d\\) and\n returned to the caller.\n\n Args:\n value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.\n filters: A 3D `Tensor`. Must have the same type as `value`.\n stride: An `integer`. The number of entries by which\n the filter is moved right at each step.\n padding: 'SAME' or 'VALID'\n use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.\n data_format: An optional `string` from `\"NWC\", \"NCW\"`. Defaults\n to `\"NWC\"`, the data is stored in the order of\n [batch, in_width, in_channels]. The `\"NCW\"` format stores\n data as [batch, in_channels, in_width].\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as input.\n\n Raises:\n ValueError: if `data_format` is invalid.\n \"\"\"\n with ops.name_scope(name, \"conv1d\", [value, filters]) as name:\n # Reshape the input tensor to [batch, 1, in_width, in_channels]\n if data_format is None or data_format == \"NHWC\" or data_format == \"NWC\":\n data_format = \"NHWC\"\n spatial_start_dim = 1\n strides = [1, 1, stride, 1]\n elif data_format == \"NCHW\" or data_format == \"NCW\":\n data_format = \"NCHW\"\n spatial_start_dim = 2\n strides = [1, 1, 1, stride]\n else:\n raise ValueError(\"data_format must be \\\"NWC\\\" or \\\"NCW\\\".\")\n value = array_ops.expand_dims(value, spatial_start_dim)\n filters = array_ops.expand_dims(filters, 0)\n result = gen_nn_ops.conv2d(\n value,\n filters,\n strides,\n padding,\n use_cudnn_on_gpu=use_cudnn_on_gpu,\n data_format=data_format)\n return array_ops.squeeze(result, [spatial_start_dim])\n\n\n@tf_export(\"nn.conv1d\", v1=[])\ndef conv1d_v2(input, # pylint: disable=redefined-builtin\n filters,\n stride,\n padding,\n data_format=None,\n name=None):\n r\"\"\"Computes a 1-D convolution given 3-D input and filter tensors.\n\n Given an input tensor of shape\n [batch, in_width, in_channels]\n if data_format is \"NWC\", or\n [batch, in_channels, in_width]\n if data_format is \"NCW\",\n and a filter / kernel tensor of shape\n [filter_width, in_channels, out_channels], this op reshapes\n the arguments to pass them to conv2d to perform the equivalent\n convolution operation.\n\n Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.\n For example, if `data_format` does not start with \"NC\", a tensor of shape\n [batch, in_width, in_channels]\n is reshaped to\n [batch, 1, in_width, in_channels],\n and the filter is reshaped to\n [1, filter_width, in_channels, out_channels].\n The result is then reshaped back to\n [batch, out_width, out_channels]\n \\(where out_width is a function of the stride and padding as in conv2d\\) and\n returned to the caller.\n\n Args:\n input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.\n filters: A 3D `Tensor`. Must have the same type as `input`.\n stride: An `integer`. The number of entries by which\n the filter is moved right at each step.\n padding: 'SAME' or 'VALID'\n data_format: An optional `string` from `\"NWC\", \"NCW\"`. Defaults\n to `\"NWC\"`, the data is stored in the order of\n [batch, in_width, in_channels]. The `\"NCW\"` format stores\n data as [batch, in_channels, in_width].\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as input.\n\n Raises:\n ValueError: if `data_format` is invalid.\n \"\"\"\n return conv1d(input, # pylint: disable=redefined-builtin\n filters,\n stride,\n padding,\n use_cudnn_on_gpu=True,\n data_format=data_format,\n name=name)\n\n\ndef conv1d_transpose(\n value,\n filter, # pylint: disable=redefined-builtin\n output_shape,\n stride,\n padding=\"SAME\",\n data_format=\"NWC\",\n name=None):\n \"\"\"The transpose of `conv1d`.\n\n This operation is sometimes called \"deconvolution\" after [Deconvolutional\n Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is\n actually the transpose (gradient) of `conv1d` rather than an actual\n deconvolution.\n\n Args:\n value: A 3-D `Tensor` of type `float` and shape\n `[batch, in_width, in_channels]` for `NWC` data format or\n `[batch, in_channels, in_width]` for `NCW` data format.\n filter: A 3-D `Tensor` with the same type as `value` and shape\n `[filter_width, output_channels, in_channels]`. `filter`'s\n `in_channels` dimension must match that of `value`.\n output_shape: A 1-D `Tensor` representing the output shape of the\n deconvolution op.\n stride: An `integer`. The number of entries by which\n the filter is moved right at each step.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n See the \"returns\" section of `tf.nn.convolution` for details.\n data_format: A string. 'NHWC' and 'NCHW' are supported.\n name: Optional name for the returned tensor.\n\n Returns:\n A `Tensor` with the same type as `value`.\n\n Raises:\n ValueError: If input/output depth does not match `filter`'s shape, or if\n padding is other than `'VALID'` or `'SAME'`.\n \"\"\"\n with ops.name_scope(name, \"conv1d_transpose\",\n [value, filter, output_shape]) as name:\n output_shape_ = ops.convert_to_tensor(output_shape, name=\"output_shape\")\n if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(3)):\n raise ValueError(\"output_shape must have shape (3,), got {}\".format(\n output_shape_.get_shape()))\n\n # The format could be either NWC or NCW, map to NHWC or NCHW\n if data_format is None or data_format == \"NWC\":\n data_format_2d = \"NHWC\"\n axis = 2\n elif data_format == \"NCW\":\n data_format_2d = \"NCHW\"\n axis = 1\n else:\n raise ValueError(\"data_format must be \\\"NWC\\\" or \\\"NCW\\\".\")\n\n if not value.get_shape().dims[axis].is_compatible_with(\n filter.get_shape()[2]):\n raise ValueError(\"input channels does not match filter's input channels, \"\n \"{} != {}\".format(value.get_shape()[axis],\n filter.get_shape()[2]))\n\n if isinstance(output_shape, (list, np.ndarray)):\n # output_shape's shape should be == [3] if reached this point.\n if not filter.get_shape().dims[1].is_compatible_with(\n output_shape[axis]):\n raise ValueError(\n \"output_shape does not match filter's output channels, \"\n \"{} != {}\".format(output_shape[axis],\n filter.get_shape()[1]))\n\n if padding != \"VALID\" and padding != \"SAME\":\n raise ValueError(\"padding must be either VALID or SAME:\"\n \" {}\".format(padding))\n\n # Reshape the input tensor to [batch, 1, in_width, in_channels]\n if data_format_2d == \"NHWC\":\n output_shape_ = array_ops.concat(\n [output_shape_[:1], [1], output_shape_[1:]], axis=0)\n spatial_start_dim = 1\n strides = [1, 1, stride, 1]\n else:\n output_shape_ = array_ops.concat(\n [output_shape_[:2], [1], output_shape_[2:]], axis=0)\n spatial_start_dim = 2\n strides = [1, 1, 1, stride]\n value = array_ops.expand_dims(value, spatial_start_dim)\n filter = array_ops.expand_dims(filter, 0) # pylint: disable=redefined-builtin\n\n result = gen_nn_ops.conv2d_backprop_input(\n input_sizes=output_shape_,\n filter=filter,\n out_backprop=value,\n strides=strides,\n padding=padding,\n data_format=data_format_2d,\n name=name)\n return array_ops.squeeze(result, [spatial_start_dim])\n\n\[email protected](\"Dilation2D\", \"flops\")\ndef _calc_dilation2d_flops(graph, node):\n \"\"\"Calculates the compute resources needed for Dilation2D.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n filter_shape = graph_util.tensor_shape_from_node_def_name(\n graph, node.input[1])\n filter_shape.assert_is_fully_defined()\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n filter_height = int(filter_shape[0])\n filter_width = int(filter_shape[1])\n output_count = np.prod(output_shape.as_list(), dtype=np.int64)\n return ops.OpStats(\"flops\", (output_count * filter_height * filter_width * 2))\n\n\n@tf_export(v1=[\"nn.erosion2d\"])\ndef erosion2d(value, kernel, strides, rates, padding, name=None):\n \"\"\"Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.\n\n The `value` tensor has shape `[batch, in_height, in_width, depth]` and the\n `kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,\n each input channel is processed independently of the others with its own\n structuring function. The `output` tensor has shape\n `[batch, out_height, out_width, depth]`. The spatial dimensions of the\n output tensor depend on the `padding` algorithm. We currently only support the\n default \"NHWC\" `data_format`.\n\n In detail, the grayscale morphological 2-D erosion is given by:\n\n output[b, y, x, c] =\n min_{dy, dx} value[b,\n strides[1] * y - rates[1] * dy,\n strides[2] * x - rates[2] * dx,\n c] -\n kernel[dy, dx, c]\n\n Duality: The erosion of `value` by the `kernel` is equal to the negation of\n the dilation of `-value` by the reflected `kernel`.\n\n Args:\n value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.\n kernel: A `Tensor`. Must have the same type as `value`.\n 3-D with shape `[kernel_height, kernel_width, depth]`.\n strides: A list of `ints` that has length `>= 4`.\n 1-D of length 4. The stride of the sliding window for each dimension of\n the input tensor. Must be: `[1, stride_height, stride_width, 1]`.\n rates: A list of `ints` that has length `>= 4`.\n 1-D of length 4. The input stride for atrous morphological dilation.\n Must be: `[1, rate_height, rate_width, 1]`.\n padding: A `string` from: `\"SAME\", \"VALID\"`.\n The type of padding algorithm to use.\n name: A name for the operation (optional). If not specified \"erosion2d\"\n is used.\n\n Returns:\n A `Tensor`. Has the same type as `value`.\n 4-D with shape `[batch, out_height, out_width, depth]`.\n\n Raises:\n ValueError: If the `value` depth does not match `kernel`' shape, or if\n padding is other than `'VALID'` or `'SAME'`.\n \"\"\"\n with ops.name_scope(name, \"erosion2d\", [value, kernel]) as name:\n # Reduce erosion to dilation by duality.\n return math_ops.negative(\n gen_nn_ops.dilation2d(\n input=math_ops.negative(value),\n filter=array_ops.reverse_v2(kernel, [0, 1]),\n strides=strides,\n rates=rates,\n padding=padding,\n name=name))\n\n\n@tf_export(\"nn.erosion2d\", v1=[])\ndef erosion2d_v2(value,\n filters,\n strides,\n padding,\n data_format,\n dilations,\n name=None):\n \"\"\"Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.\n\n The `value` tensor has shape `[batch, in_height, in_width, depth]` and the\n `filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,\n each input channel is processed independently of the others with its own\n structuring function. The `output` tensor has shape\n `[batch, out_height, out_width, depth]`. The spatial dimensions of the\n output tensor depend on the `padding` algorithm. We currently only support the\n default \"NHWC\" `data_format`.\n\n In detail, the grayscale morphological 2-D erosion is given by:\n\n output[b, y, x, c] =\n min_{dy, dx} value[b,\n strides[1] * y - dilations[1] * dy,\n strides[2] * x - dilations[2] * dx,\n c] -\n filters[dy, dx, c]\n\n Duality: The erosion of `value` by the `filters` is equal to the negation of\n the dilation of `-value` by the reflected `filters`.\n\n Args:\n value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.\n filters: A `Tensor`. Must have the same type as `value`.\n 3-D with shape `[filters_height, filters_width, depth]`.\n strides: A list of `ints` that has length `>= 4`.\n 1-D of length 4. The stride of the sliding window for each dimension of\n the input tensor. Must be: `[1, stride_height, stride_width, 1]`.\n padding: A `string` from: `\"SAME\", \"VALID\"`.\n The type of padding algorithm to use.\n data_format: A `string`, only `\"NHWC\"` is currently supported.\n dilations: A list of `ints` that has length `>= 4`.\n 1-D of length 4. The input stride for atrous morphological dilation.\n Must be: `[1, rate_height, rate_width, 1]`.\n name: A name for the operation (optional). If not specified \"erosion2d\"\n is used.\n\n Returns:\n A `Tensor`. Has the same type as `value`.\n 4-D with shape `[batch, out_height, out_width, depth]`.\n\n Raises:\n ValueError: If the `value` depth does not match `filters`' shape, or if\n padding is other than `'VALID'` or `'SAME'`.\n \"\"\"\n if data_format != \"NHWC\":\n raise ValueError(\"Data formats other than NHWC are not yet supported\")\n\n with ops.name_scope(name, \"erosion2d\", [value, filters]) as name:\n # Reduce erosion to dilation by duality.\n return math_ops.negative(\n gen_nn_ops.dilation2d(\n input=math_ops.negative(value),\n filter=array_ops.reverse_v2(filters, [0, 1]),\n strides=strides,\n rates=dilations,\n padding=padding,\n name=name))\n\n\n@tf_export(v1=[\"math.in_top_k\", \"nn.in_top_k\"])\ndef in_top_k(predictions, targets, k, name=None):\n r\"\"\"Says whether the targets are in the top `K` predictions.\n\n This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the\n prediction for the target class is among the top `k` predictions among\n all predictions for example `i`. Note that the behavior of `InTopK` differs\n from the `TopK` op in its handling of ties; if multiple classes have the\n same prediction value and straddle the top-`k` boundary, all of those\n classes are considered to be in the top `k`.\n\n More formally, let\n\n \\\\(predictions_i\\\\) be the predictions for all classes for example `i`,\n \\\\(targets_i\\\\) be the target class for example `i`,\n \\\\(out_i\\\\) be the output for example `i`,\n\n $$out_i = predictions_{i, targets_i} \\in TopKIncludingTies(predictions_i)$$\n\n Args:\n predictions: A `Tensor` of type `float32`.\n A `batch_size` x `classes` tensor.\n targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n A `batch_size` vector of class ids.\n k: An `int`. Number of top elements to look at for computing precision.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.\n \"\"\"\n with ops.name_scope(name, \"in_top_k\"):\n return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)\n\n\n@tf_export(\"math.in_top_k\", \"nn.in_top_k\", v1=[])\ndef in_top_k_v2(targets, predictions, k, name=None):\n return in_top_k(predictions, targets, k, name)\n\n\nin_top_k_v2.__doc__ = in_top_k.__doc__\n\n\ntf_export(v1=[\"nn.quantized_avg_pool\"])(gen_nn_ops.quantized_avg_pool)\ntf_export(v1=[\"nn.quantized_conv2d\"])(gen_nn_ops.quantized_conv2d)\ntf_export(v1=[\"nn.quantized_relu_x\"])(gen_nn_ops.quantized_relu_x)\ntf_export(v1=[\"nn.quantized_max_pool\"])(gen_nn_ops.quantized_max_pool)\n"
] | [
[
"numpy.any",
"tensorflow.python.ops.gen_nn_ops.conv3d_backprop_input_v2",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.ops.math_ops.negative",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.compat.compat.forward_compatible",
"tensorflow.python.ops.gen_nn_ops.fractional_max_pool",
"tensorflow.python.ops.gen_nn_ops.conv2d",
"tensorflow.python.ops.array_ops.batch_to_space",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.stop_gradient",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.batch_to_space_nd",
"tensorflow.python.ops.gen_nn_ops.conv3d",
"numpy.array",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.gen_nn_ops.relu",
"tensorflow.python.ops.gen_nn_ops.bias_add",
"tensorflow.python.util.deprecation.deprecated_argument_lookup",
"tensorflow.python.ops.array_ops.space_to_batch_nd",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.gen_nn_ops.softmax_cross_entropy_with_logits",
"tensorflow.python.framework.ops.RegisterStatistics",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.gen_nn_ops.sparse_softmax_cross_entropy_with_logits",
"tensorflow.python.ops.gen_nn_ops.in_top_kv2",
"tensorflow.python.ops.math_ops.divide",
"tensorflow.python.ops.gen_nn_ops.max_pool_with_argmax",
"tensorflow.python.ops.gen_nn_ops.avg_pool",
"tensorflow.python.framework.tensor_shape.vector",
"numpy.concatenate",
"tensorflow.python.framework.tensor_shape.dimension_at_index",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.array_ops.reverse_v2",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.gen_nn_ops.conv2d_backprop_filter",
"numpy.asscalar",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.framework.ops.OpStats",
"tensorflow.python.ops.gen_nn_ops.conv2d_backprop_input",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.random_ops.random_uniform",
"numpy.zeros",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.floor",
"tensorflow.python.ops.gen_nn_ops.max_pool",
"tensorflow.python.framework.errors_impl.InvalidArgumentError",
"tensorflow.python.framework.graph_util.tensor_shape_from_node_def_name",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.gen_nn_ops.relu6",
"tensorflow.python.ops.array_ops.required_space_to_batch_paddings",
"tensorflow.python.util.deprecation.deprecated_arg_values",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.gen_nn_ops.leaky_relu",
"tensorflow.python.ops.math_ops.to_float",
"tensorflow.python.ops.gen_nn_ops.dilation2d",
"tensorflow.python.ops.gen_nn_ops.bias_add_v1",
"tensorflow.python.ops.math_ops.less",
"numpy.all",
"tensorflow.python.ops.gen_nn_ops.fractional_avg_pool",
"tensorflow.python.ops.gen_nn_ops.top_kv2",
"numpy.broadcast_to",
"tensorflow.python.ops.array_ops.space_to_batch",
"tensorflow.python.ops.gen_nn_ops.nth_element",
"tensorflow.python.util.deprecation.rewrite_argument_docstring",
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.framework.random_seed.get_seed",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.stack",
"numpy.full"
]
] |
cgangwar11/pandas | [
"972f491cb7fdcc3c1c2cb9f05644128f13457f87"
] | [
"pandas/core/indexes/datetimes.py"
] | [
"from datetime import date, datetime, time, timedelta, tzinfo\nimport operator\nfrom typing import Optional\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import NaT, Period, Timestamp, index as libindex, lib\nfrom pandas._libs.tslibs import (\n Resolution,\n ints_to_pydatetime,\n parsing,\n timezones,\n to_offset,\n)\nfrom pandas._libs.tslibs.offsets import prefix_mapping\nfrom pandas._typing import DtypeObj, Label\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import cache_readonly, doc\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_float,\n is_integer,\n is_scalar,\n)\nfrom pandas.core.dtypes.missing import is_valid_nat_for_dtype\n\nfrom pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype\nimport pandas.core.common as com\nfrom pandas.core.indexes.base import Index, maybe_extract_name\nfrom pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin\nfrom pandas.core.indexes.extension import inherit_names\nfrom pandas.core.tools.times import to_time\n\n\ndef _new_DatetimeIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__\n \"\"\"\n if \"data\" in d and not isinstance(d[\"data\"], DatetimeIndex):\n # Avoid need to verify integrity by calling simple_new directly\n data = d.pop(\"data\")\n if not isinstance(data, DatetimeArray):\n # For backward compat with older pickles, we may need to construct\n # a DatetimeArray to adapt to the newer _simple_new signature\n tz = d.pop(\"tz\")\n freq = d.pop(\"freq\")\n dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)\n else:\n dta = data\n for key in [\"tz\", \"freq\"]:\n # These are already stored in our DatetimeArray; if they are\n # also in the pickle and don't match, we have a problem.\n if key in d:\n assert d.pop(key) == getattr(dta, key)\n result = cls._simple_new(dta, **d)\n else:\n with warnings.catch_warnings():\n # TODO: If we knew what was going in to **d, we might be able to\n # go through _simple_new instead\n warnings.simplefilter(\"ignore\")\n result = cls.__new__(cls, **d)\n\n return result\n\n\n@inherit_names(\n [\"to_perioddelta\", \"to_julian_date\", \"strftime\", \"isocalendar\"]\n + DatetimeArray._field_ops\n + [\n method\n for method in DatetimeArray._datetimelike_methods\n if method not in (\"tz_localize\",)\n ],\n DatetimeArray,\n wrap=True,\n)\n@inherit_names([\"is_normalized\", \"_resolution_obj\"], DatetimeArray, cache=True)\n@inherit_names(\n [\n \"_bool_ops\",\n \"_object_ops\",\n \"_field_ops\",\n \"_datetimelike_ops\",\n \"_datetimelike_methods\",\n \"tz\",\n \"tzinfo\",\n \"dtype\",\n \"to_pydatetime\",\n \"_has_same_tz\",\n \"_format_native_types\",\n \"date\",\n \"time\",\n \"timetz\",\n ]\n + DatetimeArray._bool_ops,\n DatetimeArray,\n)\nclass DatetimeIndex(DatetimeTimedeltaMixin):\n \"\"\"\n Immutable ndarray-like of datetime64 data.\n\n Represented internally as int64, and which can be boxed to Timestamp objects\n that are subclasses of datetime and carry metadata.\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional datetime-like data to construct index with.\n freq : str or pandas offset object, optional\n One of pandas date offset strings or corresponding objects. The string\n 'infer' can be passed in order to set the frequency of the index as the\n inferred frequency upon creation.\n tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str\n Set the Timezone of the data.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n closed : {'left', 'right'}, optional\n Set whether to include `start` and `end` that are on the\n boundary. The default includes boundary points on either end.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from 03:00\n DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC\n and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter\n dictates how ambiguous times should be handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False signifies a\n non-DST time (note that this flag is only applicable for ambiguous\n times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous times.\n dayfirst : bool, default False\n If True, parse dates in `data` with the day first order.\n yearfirst : bool, default False\n If True parse dates in `data` with the year first order.\n dtype : numpy.dtype or DatetimeTZDtype or str, default None\n Note that the only NumPy dtype allowed is ‘datetime64[ns]’.\n copy : bool, default False\n Make a copy of input ndarray.\n name : label, default None\n Name to be stored in the index.\n\n Attributes\n ----------\n year\n month\n day\n hour\n minute\n second\n microsecond\n nanosecond\n date\n time\n timetz\n dayofyear\n weekofyear\n week\n dayofweek\n weekday\n quarter\n tz\n freq\n freqstr\n is_month_start\n is_month_end\n is_quarter_start\n is_quarter_end\n is_year_start\n is_year_end\n is_leap_year\n inferred_freq\n\n Methods\n -------\n normalize\n strftime\n snap\n tz_convert\n tz_localize\n round\n floor\n ceil\n to_period\n to_perioddelta\n to_pydatetime\n to_series\n to_frame\n month_name\n day_name\n mean\n\n See Also\n --------\n Index : The base pandas Index type.\n TimedeltaIndex : Index of timedelta64 data.\n PeriodIndex : Index of Period data.\n to_datetime : Convert argument to datetime.\n date_range : Create a fixed-frequency DatetimeIndex.\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n \"\"\"\n\n _typ = \"datetimeindex\"\n\n _engine_type = libindex.DatetimeEngine\n _supports_partial_string_indexing = True\n\n _comparables = [\"name\", \"freqstr\", \"tz\"]\n _attributes = [\"name\", \"tz\", \"freq\"]\n\n _is_numeric_dtype = False\n\n _data: DatetimeArray\n tz: Optional[tzinfo]\n\n # --------------------------------------------------------------------\n # methods that dispatch to array and wrap result in DatetimeIndex\n\n @doc(DatetimeArray.tz_localize)\n def tz_localize(\n self, tz, ambiguous=\"raise\", nonexistent=\"raise\"\n ) -> \"DatetimeIndex\":\n arr = self._data.tz_localize(tz, ambiguous, nonexistent)\n return type(self)._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.to_period)\n def to_period(self, freq=None) -> \"DatetimeIndex\":\n arr = self._data.to_period(freq)\n return type(self)._simple_new(arr, name=self.name)\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data=None,\n freq=lib.no_default,\n tz=None,\n normalize=False,\n closed=None,\n ambiguous=\"raise\",\n dayfirst=False,\n yearfirst=False,\n dtype=None,\n copy=False,\n name=None,\n ):\n\n if is_scalar(data):\n raise TypeError(\n f\"{cls.__name__}() must be called with a \"\n f\"collection of some kind, {repr(data)} was passed\"\n )\n\n # - Cases checked above all return/raise before reaching here - #\n\n name = maybe_extract_name(name, data, cls)\n\n dtarr = DatetimeArray._from_sequence(\n data,\n dtype=dtype,\n copy=copy,\n tz=tz,\n freq=freq,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n ambiguous=ambiguous,\n )\n\n subarr = cls._simple_new(dtarr, name=name)\n return subarr\n\n @classmethod\n def _simple_new(cls, values: DatetimeArray, name: Label = None):\n assert isinstance(values, DatetimeArray), type(values)\n\n result = object.__new__(cls)\n result._data = values\n result.name = name\n result._cache = {}\n result._no_setting_name = False\n # For groupby perf. See note in indexes/base about _index_data\n result._index_data = values._data\n result._reset_identity()\n return result\n\n # --------------------------------------------------------------------\n\n @cache_readonly\n def _is_dates_only(self) -> bool:\n \"\"\"\n Return a boolean if we are only dates (and don't have a timezone)\n\n Returns\n -------\n bool\n \"\"\"\n from pandas.io.formats.format import _is_dates_only\n\n return self.tz is None and _is_dates_only(self._values)\n\n def __reduce__(self):\n\n # we use a special reduce here because we need\n # to simply set the .tz (and not reinterpret it)\n\n d = dict(data=self._data)\n d.update(self._get_attributes_dict())\n return _new_DatetimeIndex, (type(self), d), None\n\n def _convert_for_op(self, value):\n \"\"\"\n Convert value to be insertable to ndarray.\n \"\"\"\n if self._has_same_tz(value):\n return Timestamp(value).asm8\n raise ValueError(\"Passed item and index have different timezone\")\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n \"\"\"\n Can we compare values of the given dtype to our own?\n \"\"\"\n if not is_datetime64_any_dtype(dtype):\n return False\n if self.tz is not None:\n # If we have tz, we can compare to tzaware\n return is_datetime64tz_dtype(dtype)\n # if we dont have tz, we can only compare to tznaive\n return is_datetime64_dtype(dtype)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n def _mpl_repr(self):\n # how to represent ourselves to matplotlib\n return ints_to_pydatetime(self.asi8, self.tz)\n\n @property\n def _formatter_func(self):\n from pandas.io.formats.format import _get_format_datetime64\n\n formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)\n return lambda x: f\"'{formatter(x, tz=self.tz)}'\"\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n def union_many(self, others):\n \"\"\"\n A bit of a hack to accelerate unioning a collection of indexes.\n \"\"\"\n this = self\n\n for other in others:\n if not isinstance(this, DatetimeIndex):\n this = Index.union(this, other)\n continue\n\n if not isinstance(other, DatetimeIndex):\n try:\n other = DatetimeIndex(other)\n except TypeError:\n pass\n\n this, other = this._maybe_utc_convert(other)\n\n if this._can_fast_union(other):\n this = this._fast_union(other)\n else:\n this = Index.union(this, other)\n return this\n\n # --------------------------------------------------------------------\n\n def _get_time_micros(self):\n \"\"\"\n Return the number of microseconds since midnight.\n\n Returns\n -------\n ndarray[int64_t]\n \"\"\"\n values = self.asi8\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._data._local_timestamps()\n\n nanos = values % (24 * 3600 * 1_000_000_000)\n micros = nanos // 1000\n\n micros[self._isnan] = -1\n return micros\n\n def to_series(self, keep_tz=lib.no_default, index=None, name=None):\n \"\"\"\n Create a Series with both index and values equal to the index keys\n useful with map for returning an indexer based on an index.\n\n Parameters\n ----------\n keep_tz : optional, defaults True\n Return the data keeping the timezone.\n\n If keep_tz is True:\n\n If the timezone is not set, the resulting\n Series will have a datetime64[ns] dtype.\n\n Otherwise the Series will have an datetime64[ns, tz] dtype; the\n tz will be preserved.\n\n If keep_tz is False:\n\n Series will have a datetime64[ns] dtype. TZ aware\n objects will have the tz removed.\n\n .. versionchanged:: 1.0.0\n The default value is now True. In a future version,\n this keyword will be removed entirely. Stop passing the\n argument to obtain the future behavior and silence the warning.\n\n index : Index, optional\n Index of resulting Series. If None, defaults to original index.\n name : str, optional\n Name of resulting Series. If None, defaults to name of original\n index.\n\n Returns\n -------\n Series\n \"\"\"\n from pandas import Series\n\n if index is None:\n index = self._shallow_copy()\n if name is None:\n name = self.name\n\n if keep_tz is not lib.no_default:\n if keep_tz:\n warnings.warn(\n \"The 'keep_tz' keyword in DatetimeIndex.to_series \"\n \"is deprecated and will be removed in a future version. \"\n \"You can stop passing 'keep_tz' to silence this warning.\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n warnings.warn(\n \"Specifying 'keep_tz=False' is deprecated and this \"\n \"option will be removed in a future release. If \"\n \"you want to remove the timezone information, you \"\n \"can do 'idx.tz_convert(None)' before calling \"\n \"'to_series'.\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n keep_tz = True\n\n if keep_tz and self.tz is not None:\n # preserve the tz & copy\n values = self.copy(deep=True)\n else:\n values = self._values.view(\"M8[ns]\").copy()\n\n return Series(values, index=index, name=name)\n\n def snap(self, freq=\"S\"):\n \"\"\"\n Snap time stamps to nearest occurring frequency.\n\n Returns\n -------\n DatetimeIndex\n \"\"\"\n # Superdumb, punting on any optimizing\n freq = to_offset(freq)\n\n snapped = np.empty(len(self), dtype=DT64NS_DTYPE)\n\n for i, v in enumerate(self):\n s = v\n if not freq.is_on_offset(s):\n t0 = freq.rollback(s)\n t1 = freq.rollforward(s)\n if abs(s - t0) < abs(t1 - s):\n s = t0\n else:\n s = t1\n snapped[i] = s\n\n dta = DatetimeArray(snapped, dtype=self.dtype)\n return DatetimeIndex._simple_new(dta, name=self.name)\n\n def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):\n \"\"\"\n Calculate datetime bounds for parsed time string and its resolution.\n\n Parameters\n ----------\n reso : str\n Resolution provided by parsed string.\n parsed : datetime\n Datetime from parsed string.\n\n Returns\n -------\n lower, upper: pd.Timestamp\n \"\"\"\n assert isinstance(reso, Resolution), (type(reso), reso)\n valid_resos = {\n \"year\",\n \"month\",\n \"quarter\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"minute\",\n \"second\",\n \"microsecond\",\n }\n if reso.attrname not in valid_resos:\n raise KeyError\n\n grp = reso.freq_group\n per = Period(parsed, freq=grp)\n start, end = per.start_time, per.end_time\n\n # GH 24076\n # If an incoming date string contained a UTC offset, need to localize\n # the parsed date to this offset first before aligning with the index's\n # timezone\n if parsed.tzinfo is not None:\n if self.tz is None:\n raise ValueError(\n \"The index must be timezone aware when indexing \"\n \"with a date string with a UTC offset\"\n )\n start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)\n end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)\n elif self.tz is not None:\n start = start.tz_localize(self.tz)\n end = end.tz_localize(self.tz)\n return start, end\n\n def _validate_partial_date_slice(self, reso: Resolution):\n assert isinstance(reso, Resolution), (type(reso), reso)\n if (\n self.is_monotonic\n and reso.attrname in [\"day\", \"hour\", \"minute\", \"second\"]\n and self._resolution_obj >= reso\n ):\n # These resolution/monotonicity validations came from GH3931,\n # GH3452 and GH2369.\n\n # See also GH14826\n raise KeyError\n\n if reso == \"microsecond\":\n # _partial_date_slice doesn't allow microsecond resolution, but\n # _parsed_string_to_bounds allows it.\n raise KeyError\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label\n\n Returns\n -------\n loc : int\n \"\"\"\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n orig_key = key\n if is_valid_nat_for_dtype(key, self.dtype):\n key = NaT\n\n if isinstance(key, self._data._recognized_scalars):\n # needed to localize naive datetimes\n key = self._maybe_cast_for_get_loc(key)\n\n elif isinstance(key, str):\n try:\n return self._get_string_slice(key)\n except (TypeError, KeyError, ValueError, OverflowError):\n pass\n\n try:\n key = self._maybe_cast_for_get_loc(key)\n except ValueError as err:\n raise KeyError(key) from err\n\n elif isinstance(key, timedelta):\n # GH#20464\n raise TypeError(\n f\"Cannot index {type(self).__name__} with {type(key).__name__}\"\n )\n\n elif isinstance(key, time):\n if method is not None:\n raise NotImplementedError(\n \"cannot yet lookup inexact labels when key is a time object\"\n )\n return self.indexer_at_time(key)\n\n else:\n # unrecognized type\n raise KeyError(key)\n\n try:\n return Index.get_loc(self, key, method, tolerance)\n except KeyError as err:\n raise KeyError(orig_key) from err\n\n def _maybe_cast_for_get_loc(self, key) -> Timestamp:\n # needed to localize naive datetimes\n key = Timestamp(key)\n if key.tzinfo is None:\n key = key.tz_localize(self.tz)\n else:\n key = key.tz_convert(self.tz)\n return key\n\n def _maybe_cast_slice_bound(self, label, side: str, kind):\n \"\"\"\n If label is a string, cast it to datetime according to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None]\n\n if is_float(label) or isinstance(label, time) or is_integer(label):\n self._invalid_indexer(\"slice\", label)\n\n if isinstance(label, str):\n freq = getattr(self, \"freqstr\", getattr(self, \"inferred_freq\", None))\n parsed, reso = parsing.parse_time_string(label, freq)\n reso = Resolution.from_attrname(reso)\n lower, upper = self._parsed_string_to_bounds(reso, parsed)\n # lower, upper form the half-open interval:\n # [parsed, parsed + 1 freq)\n # because label may be passed to searchsorted\n # the bounds need swapped if index is reverse sorted and has a\n # length > 1 (is_monotonic_decreasing gives True for empty\n # and length 1 index)\n if self._is_strictly_monotonic_decreasing and len(self) > 1:\n return upper if side == \"left\" else lower\n return lower if side == \"left\" else upper\n else:\n return label\n\n def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):\n freq = getattr(self, \"freqstr\", getattr(self, \"inferred_freq\", None))\n parsed, reso = parsing.parse_time_string(key, freq)\n reso = Resolution.from_attrname(reso)\n loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)\n return loc\n\n def slice_indexer(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n Return indexer for specified label slice.\n Index.slice_indexer, customized to handle time slicing.\n\n In addition to functionality provided by Index.slice_indexer, does the\n following:\n\n - if both `start` and `end` are instances of `datetime.time`, it\n invokes `indexer_between_time`\n - if `start` and `end` are both either string or None perform\n value-based selection in non-monotonic cases.\n\n \"\"\"\n # For historical reasons DatetimeIndex supports slices between two\n # instances of datetime.time as if it were applying a slice mask to\n # an array of (self.hour, self.minute, self.seconds, self.microsecond).\n if isinstance(start, time) and isinstance(end, time):\n if step is not None and step != 1:\n raise ValueError(\"Must have step size of 1 with time slices\")\n return self.indexer_between_time(start, end)\n\n if isinstance(start, time) or isinstance(end, time):\n raise KeyError(\"Cannot mix time and non-time slice keys\")\n\n # Pandas supports slicing with dates, treated as datetimes at midnight.\n # https://github.com/pandas-dev/pandas/issues/31501\n if isinstance(start, date) and not isinstance(start, datetime):\n start = datetime.combine(start, time(0, 0))\n if isinstance(end, date) and not isinstance(end, datetime):\n end = datetime.combine(end, time(0, 0))\n\n try:\n return Index.slice_indexer(self, start, end, step, kind=kind)\n except KeyError:\n # For historical reasons DatetimeIndex by default supports\n # value-based partial (aka string) slices on non-monotonic arrays,\n # let's try that.\n if (start is None or isinstance(start, str)) and (\n end is None or isinstance(end, str)\n ):\n mask = True\n if start is not None:\n start_casted = self._maybe_cast_slice_bound(start, \"left\", kind)\n mask = start_casted <= self\n\n if end is not None:\n end_casted = self._maybe_cast_slice_bound(end, \"right\", kind)\n mask = (self <= end_casted) & mask\n\n indexer = mask.nonzero()[0][::step]\n if len(indexer) == len(self):\n return slice(None)\n else:\n return indexer\n else:\n raise\n\n # --------------------------------------------------------------------\n\n def is_type_compatible(self, typ) -> bool:\n return typ == self.inferred_type or typ == \"datetime\"\n\n @property\n def inferred_type(self) -> str:\n # b/c datetime is represented as microseconds since the epoch, make\n # sure we can't have ambiguous indexing\n return \"datetime64\"\n\n def indexer_at_time(self, time, asof=False):\n \"\"\"\n Return index locations of values at particular time of day\n (e.g. 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n Time passed in either as object (datetime.time) or as string in\n appropriate format (\"%H:%M\", \"%H%M\", \"%I:%M%p\", \"%I%M%p\",\n \"%H:%M:%S\", \"%H%M%S\", \"%I:%M:%S%p\", \"%I%M%S%p\").\n\n Returns\n -------\n values_at_time : array of integers\n\n See Also\n --------\n indexer_between_time : Get index locations of values between particular\n times of day.\n DataFrame.at_time : Select values at particular time of day.\n \"\"\"\n if asof:\n raise NotImplementedError(\"'asof' argument is not supported\")\n\n if isinstance(time, str):\n from dateutil.parser import parse\n\n time = parse(time).time()\n\n if time.tzinfo:\n if self.tz is None:\n raise ValueError(\"Index must be timezone aware.\")\n time_micros = self.tz_convert(time.tzinfo)._get_time_micros()\n else:\n time_micros = self._get_time_micros()\n micros = _time_to_micros(time)\n return (micros == time_micros).nonzero()[0]\n\n def indexer_between_time(\n self, start_time, end_time, include_start=True, include_end=True\n ):\n \"\"\"\n Return index locations of values between particular times of day\n (e.g., 9:00-9:30AM).\n\n Parameters\n ----------\n start_time, end_time : datetime.time, str\n Time passed either as object (datetime.time) or as string in\n appropriate format (\"%H:%M\", \"%H%M\", \"%I:%M%p\", \"%I%M%p\",\n \"%H:%M:%S\", \"%H%M%S\", \"%I:%M:%S%p\",\"%I%M%S%p\").\n include_start : bool, default True\n include_end : bool, default True\n\n Returns\n -------\n values_between_time : array of integers\n\n See Also\n --------\n indexer_at_time : Get index locations of values at particular time of day.\n DataFrame.between_time : Select values between particular times of day.\n \"\"\"\n start_time = to_time(start_time)\n end_time = to_time(end_time)\n time_micros = self._get_time_micros()\n start_micros = _time_to_micros(start_time)\n end_micros = _time_to_micros(end_time)\n\n if include_start and include_end:\n lop = rop = operator.le\n elif include_start:\n lop = operator.le\n rop = operator.lt\n elif include_end:\n lop = operator.lt\n rop = operator.le\n else:\n lop = rop = operator.lt\n\n if start_time <= end_time:\n join_op = operator.and_\n else:\n join_op = operator.or_\n\n mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))\n\n return mask.nonzero()[0]\n\n\nDatetimeIndex._add_logical_methods_disabled()\n\n\ndef date_range(\n start=None,\n end=None,\n periods=None,\n freq=None,\n tz=None,\n normalize=False,\n name=None,\n closed=None,\n **kwargs,\n) -> DatetimeIndex:\n \"\"\"\n Return a fixed frequency DatetimeIndex.\n\n Parameters\n ----------\n start : str or datetime-like, optional\n Left bound for generating dates.\n end : str or datetime-like, optional\n Right bound for generating dates.\n periods : int, optional\n Number of periods to generate.\n freq : str or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5H'. See\n :ref:`here <timeseries.offset_aliases>` for a list of\n frequency aliases.\n tz : str or tzinfo, optional\n Time zone name for returning localized DatetimeIndex, for example\n 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is\n timezone-naive.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting DatetimeIndex.\n closed : {None, 'left', 'right'}, optional\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None, the default).\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n rng : DatetimeIndex\n\n See Also\n --------\n DatetimeIndex : An immutable container for datetimes.\n timedelta_range : Return a fixed frequency TimedeltaIndex.\n period_range : Return a fixed frequency PeriodIndex.\n interval_range : Return a fixed frequency IntervalIndex.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``DatetimeIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n **Specifying the values**\n\n The next four examples generate the same `DatetimeIndex`, but vary\n the combination of `start`, `end` and `periods`.\n\n Specify `start` and `end`, with the default daily frequency.\n\n >>> pd.date_range(start='1/1/2018', end='1/08/2018')\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `start` and `periods`, the number of periods (days).\n\n >>> pd.date_range(start='1/1/2018', periods=8)\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `end` and `periods`, the number of periods (days).\n\n >>> pd.date_range(end='1/1/2018', periods=8)\n DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',\n '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `start`, `end`, and `periods`; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)\n DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',\n '2018-04-27 00:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Other Parameters**\n\n Changed the `freq` (frequency) to ``'M'`` (month end frequency).\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq='M')\n DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',\n '2018-05-31'],\n dtype='datetime64[ns]', freq='M')\n\n Multiples are allowed\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq='3M')\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq='3M')\n\n `freq` can also be specified as an Offset object.\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq='3M')\n\n Specify `tz` to set the timezone.\n\n >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')\n DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',\n '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',\n '2018-01-05 00:00:00+09:00'],\n dtype='datetime64[ns, Asia/Tokyo]', freq='D')\n\n `closed` controls whether to include `start` and `end` that are on the\n boundary. The default includes boundary points on either end.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq='D')\n\n Use ``closed='left'`` to exclude `end` if it falls on the boundary.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],\n dtype='datetime64[ns]', freq='D')\n\n Use ``closed='right'`` to exclude `start` if it falls on the boundary.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')\n DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq='D')\n \"\"\"\n if freq is None and com.any_none(periods, start, end):\n freq = \"D\"\n\n dtarr = DatetimeArray._generate_range(\n start=start,\n end=end,\n periods=periods,\n freq=freq,\n tz=tz,\n normalize=normalize,\n closed=closed,\n **kwargs,\n )\n return DatetimeIndex._simple_new(dtarr, name=name)\n\n\ndef bdate_range(\n start=None,\n end=None,\n periods=None,\n freq=\"B\",\n tz=None,\n normalize=True,\n name=None,\n weekmask=None,\n holidays=None,\n closed=None,\n **kwargs,\n) -> DatetimeIndex:\n \"\"\"\n Return a fixed frequency DatetimeIndex, with business day as the default\n frequency.\n\n Parameters\n ----------\n start : str or datetime-like, default None\n Left bound for generating dates.\n end : str or datetime-like, default None\n Right bound for generating dates.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, default 'B' (business daily)\n Frequency strings can have multiples, e.g. '5H'.\n tz : str or None\n Time zone name for returning localized DatetimeIndex, for example\n Asia/Beijing.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting DatetimeIndex.\n weekmask : str or None, default None\n Weekmask of valid business days, passed to ``numpy.busdaycalendar``,\n only used when custom frequency strings are passed. The default\n value None is equivalent to 'Mon Tue Wed Thu Fri'.\n holidays : list-like or None, default None\n Dates to exclude from the set of valid business days, passed to\n ``numpy.busdaycalendar``, only used when custom frequency strings\n are passed.\n closed : str, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None).\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n DatetimeIndex\n\n Notes\n -----\n Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. Specifying ``freq`` is a requirement\n for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not\n desired.\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Note how the two weekend days are skipped in the result.\n\n >>> pd.bdate_range(start='1/1/2018', end='1/08/2018')\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-08'],\n dtype='datetime64[ns]', freq='B')\n \"\"\"\n if freq is None:\n msg = \"freq must be specified for bdate_range; use date_range instead\"\n raise TypeError(msg)\n\n if isinstance(freq, str) and freq.startswith(\"C\"):\n try:\n weekmask = weekmask or \"Mon Tue Wed Thu Fri\"\n freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)\n except (KeyError, TypeError) as err:\n msg = f\"invalid custom frequency string: {freq}\"\n raise ValueError(msg) from err\n elif holidays or weekmask:\n msg = (\n \"a custom frequency string is required when holidays or \"\n f\"weekmask are passed, got frequency {freq}\"\n )\n raise ValueError(msg)\n\n return date_range(\n start=start,\n end=end,\n periods=periods,\n freq=freq,\n tz=tz,\n normalize=normalize,\n name=name,\n closed=closed,\n **kwargs,\n )\n\n\ndef _time_to_micros(time_obj: time) -> int:\n seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second\n return 1_000_000 * seconds + time_obj.microsecond\n"
] | [
[
"pandas.Series",
"pandas._libs.tslibs.parsing.parse_time_string",
"pandas.errors.InvalidIndexError",
"pandas._libs.tslibs.ints_to_pydatetime",
"pandas._libs.Timestamp",
"pandas.core.dtypes.common.is_integer",
"pandas.util._decorators.doc",
"pandas.core.dtypes.common.is_float",
"pandas._libs.tslibs.Resolution.from_attrname",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.common.any_none",
"pandas.core.indexes.base.Index.union",
"pandas.core.arrays.datetimes.DatetimeArray._generate_range",
"pandas.core.indexes.base.Index.slice_indexer",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.dtypes.missing.is_valid_nat_for_dtype",
"pandas.core.tools.times.to_time",
"pandas._libs.Period",
"pandas._libs.tslibs.to_offset",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.io.formats.format._get_format_datetime64",
"pandas._libs.tslibs.timezones.is_utc",
"pandas.io.formats.format._is_dates_only",
"pandas.core.arrays.datetimes.DatetimeArray",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.indexes.extension.inherit_names",
"pandas.core.indexes.base.Index.get_loc",
"pandas.core.arrays.datetimes.DatetimeArray._from_sequence",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.arrays.datetimes.tz_to_dtype"
]
] |
csiu/kick | [
"0ebc9166074b702fc8b5835685ad102957ab349c"
] | [
"src/python/sim_doc.py"
] | [
"import sys\nsys.path.append(\"/Users/csiu/repo/kick/src/python\")\n\nimport argparse\nimport custom\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.utils.extmath import randomized_svd\nfrom sklearn.metrics import pairwise_distances\n\nusage = \"\"\"\nFor finding similar documents\n\"\"\"\n\ndef get_args():\n parser = argparse.ArgumentParser(description=usage)\n\n parser.add_argument('-s', '--num_singular_values', default=100, type=int,\n help=\"Number of singular values to use from SVD\")\n\n parser.add_argument('-n', '--num_results', default=None, type=int,\n help=\"Number of similar documents to print in the results\")\n\n parser.add_argument('-w', '--term_weight', default=\"tfidf\",\n choices=[\"tfidf\", \"raw\"],\n help=\"How should terms in document be weighted? 'tfidf' or 'raw' counts\")\n\n parser.add_argument('-d', '--distance', default=\"cosine\",\n help=\"Metric for calculating the distance between documents.\")\n\n parser.add_argument('-i', '--document0_id', default=None, type=int,\n help=\"Kickstarter ID of query document\")\n\n parser.add_argument('-c', '--cache_dir', default=\".\",\n help=\"Specify cache dir\")\n\n parser.add_argument('-v', '--verbose', action='store_true')\n\n args = parser.parse_args()\n\n return(args)\n\ndef get_data():\n \"\"\"\n Output dataframe w/ 2 columns: \"id\", \"document\"\n \"\"\"\n # Get data\n dk = custom.DatabaseKick()\n cur = dk.connect()\n\n cur.execute(\"SELECT id, concat_ws(name, blurb) FROM info\")\n rows = cur.fetchall()\n df = pd.DataFrame(rows, columns=[\"id\", \"document\"])\n\n dk.disconnect()\n\n return(df)\n\ndef preprocess_data(df):\n \"\"\"\n Preprocess 'document' of dataframe by\n - to lowercase\n - remove nonletters\n - tokenize\n - remove stopwords\n - stem\n Dataframe will contain additional 'doc_processed' column\n and df['doc_processed'] will be returned\n \"\"\"\n\n def join_output(func):\n \"\"\"\n Decorator function to join list output to string\n \"\"\"\n def func_wrapper(text, *arg, **karg):\n return ' '.join(func(text, *arg, **karg))\n return func_wrapper\n\n def doc_to_string(doc):\n \"\"\"\n Replace None -> empty string, and\n text newlines (\\n, \\r) -> whitespace\n \"\"\"\n if doc == None:\n return(\"\")\n else:\n return(re.sub(\"[\\n\\r]\", \"\", doc))\n\n df['document'] = df['document'].apply(\n lambda x: doc_to_string(x))\n\n text_processing = join_output(custom.text_processing)\n df['doc_processed'] = df['document'].apply(\n lambda x: text_processing(x, method=\"stem\"))\n\n return(df['doc_processed'])\n\ndef compute_distance(U, i=None, sort=False, top_n=None, metric='euclidean'):\n \"\"\"\n Compute distance of document U[i] with all documents in U\n \"\"\"\n if i != None:\n index_document0 = df[df[\"id\"] == i].index.tolist()\n else:\n index_document0 = 0\n\n document0 = np.asmatrix(U[index_document0])\n\n dist = pairwise_distances(document0, U, metric=metric)\n df_dist = pd.DataFrame(np.transpose(dist), columns=[\"dist\"])\n\n if sort:\n df_dist.sort_values(by=\"dist\", inplace=True)\n\n if top_n != None:\n assert type(top_n) is int\n df_dist = df_dist.head(top_n)\n\n return(df_dist)\n\n\nif __name__ == '__main__':\n args = get_args()\n num_singular_values = args.num_singular_values\n document0_id = args.document0_id\n num_results = args.num_results\n cache_dir = args.cache_dir\n verbose = args.verbose\n term_weight = args.term_weight\n distance_metric = args.distance\n\n preprocess_file = os.path.join(os.path.abspath(cache_dir),\n \"preprocessed.pkl\")\n\n\n msg = \"# Getting and preprocessing data...\"\n if os.path.isfile(preprocess_file):\n if verbose: print(msg, \"from cache...\")\n df = pd.read_pickle(preprocess_file)\n else:\n if verbose: print(msg)\n df = get_data()\n _ = preprocess_data(df)\n\n df.to_pickle(preprocess_file)\n\n if term_weight == \"raw\":\n if verbose: print(\"# Making count matrix...\")\n cv = CountVectorizer()\n X = cv.fit_transform(df['doc_processed'])\n else:\n if verbose: print(\"# Making TF-IDF matrix...\")\n vectorizer = TfidfVectorizer()\n X = vectorizer.fit_transform(df['doc_processed'])\n\n if verbose: print(\"# Computing SVD for %s singular values...\" %\n num_singular_values)\n U, s, Vh = randomized_svd(X, n_components=num_singular_values,\n n_iter=5, random_state=5)\n\n if verbose: print(\"# Computing distances (%s)...\" % distance_metric)\n top_n = compute_distance(U, i=document0_id,\n sort=True, top_n=num_results,\n metric=distance_metric)\n\n if verbose: print(\"# Printing results...\")\n results = []\n counter = 0\n for index, row in df.iloc[top_n.index].iterrows():\n row[\"dist\"] = top_n.iloc[counter][\"dist\"]\n results.append(row)\n counter += 1\n\n print('>> %s | %s' % (row['id'], row['doc_processed']),\n row['document'], \"\\n\", sep=\"\\n\")\n"
] | [
[
"pandas.read_pickle",
"numpy.transpose",
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.DataFrame",
"sklearn.metrics.pairwise_distances",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.asmatrix",
"sklearn.utils.extmath.randomized_svd"
]
] |
virgile-hernicot/SPIN | [
"21871e3d333ef37866402ae21498b331aa771b2d"
] | [
"datasets/preprocess/mpi_inf_3dhp.py"
] | [
"import os\nimport sys\nimport cv2\nimport glob\nimport h5py\nimport json\nimport numpy as np\nimport scipy.io as sio\nimport scipy.misc\nfrom .read_openpose import read_openpose\n\ndef read_calibration(calib_file, vid_list):\n Ks, Rs, Ts = [], [], []\n file = open(calib_file, 'r')\n content = file.readlines()\n for vid_i in vid_list:\n K = np.array([float(s) for s in content[vid_i*7+5][11:-2].split()])\n K = np.reshape(K, (4, 4))\n RT = np.array([float(s) for s in content[vid_i*7+6][11:-2].split()])\n RT = np.reshape(RT, (4, 4))\n R = RT[:3,:3]\n T = RT[:3,3]/1000\n Ks.append(K)\n Rs.append(R)\n Ts.append(T)\n return Ks, Rs, Ts\n \ndef train_data(dataset_path, openpose_path, out_path, joints_idx, scaleFactor, extract_img=False, fits_3d=None):\n\n joints17_idx = [4, 18, 19, 20, 23, 24, 25, 3, 5, 6, 7, 9, 10, 11, 14, 15, 16]\n\n h, w = 2048, 2048\n imgnames_, scales_, centers_ = [], [], []\n parts_, Ss_, openposes_ = [], [], []\n\n # training data\n user_list = range(1,9)\n seq_list = range(1,3)\n vid_list = list(range(3)) + list(range(4,9))\n\n counter = 0\n\n for user_i in user_list:\n for seq_i in seq_list:\n seq_path = os.path.join(dataset_path,\n 'S' + str(user_i),\n 'Seq' + str(seq_i))\n # mat file with annotations\n annot_file = os.path.join(seq_path, 'annot.mat')\n annot2 = sio.loadmat(annot_file)['annot2']\n annot3 = sio.loadmat(annot_file)['annot3']\n # calibration file and camera parameters\n calib_file = os.path.join(seq_path, 'camera.calibration')\n Ks, Rs, Ts = read_calibration(calib_file, vid_list)\n\n for j, vid_i in enumerate(vid_list):\n\n # image folder\n imgs_path = os.path.join(seq_path, \n 'imageFrames',\n 'video_' + str(vid_i))\n\n # extract frames from video file\n if extract_img:\n\n # if doesn't exist\n if not os.path.isdir(imgs_path):\n os.makedirs(imgs_path)\n\n # video file\n vid_file = os.path.join(seq_path,\n 'imageSequence',\n 'video_' + str(vid_i) + '.avi')\n vidcap = cv2.VideoCapture(vid_file)\n\n # process video\n frame = 0\n while 1:\n # extract all frames\n success, image = vidcap.read()\n if not success:\n break\n frame += 1\n # image name\n imgname = os.path.join(imgs_path,\n 'frame_%06d.jpg' % frame)\n # save image\n cv2.imwrite(imgname, image)\n\n # per frame\n cam_aa = cv2.Rodrigues(Rs[j])[0].T[0]\n pattern = os.path.join(imgs_path, '*.jpg')\n img_list = glob.glob(pattern)\n for i, img_i in enumerate(img_list):\n\n # for each image we store the relevant annotations\n img_name = img_i.split('/')[-1]\n img_view = os.path.join('S' + str(user_i),\n 'Seq' + str(seq_i),\n 'imageFrames',\n 'video_' + str(vid_i),\n img_name)\n joints = np.reshape(annot2[vid_i][0][i], (28, 2))[joints17_idx]\n S17 = np.reshape(annot3[vid_i][0][i], (28, 3))/1000\n S17 = S17[joints17_idx] - S17[4] # 4 is the root\n bbox = [min(joints[:,0]), min(joints[:,1]),\n max(joints[:,0]), max(joints[:,1])]\n center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]\n scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200\n\n # check that all joints are visible\n x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)\n y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)\n ok_pts = np.logical_and(x_in, y_in)\n if np.sum(ok_pts) < len(joints_idx):\n continue\n \n part = np.zeros([24,3])\n part[joints_idx] = np.hstack([joints, np.ones([17,1])])\n json_file = os.path.join(openpose_path, 'mpi_inf_3dhp',\n img_view.replace('.jpg', '_keypoints.json'))\n openpose = read_openpose(json_file, part, 'mpi_inf_3dhp')\n\n S = np.zeros([24,4])\n S[joints_idx] = np.hstack([S17, np.ones([17,1])])\n\n # because of the dataset size, we only keep every 10th frame\n counter += 1\n if counter % 10 != 1:\n continue\n\n # store the data\n imgnames_.append(img_view)\n centers_.append(center)\n scales_.append(scale)\n parts_.append(part)\n Ss_.append(S)\n openposes_.append(openpose)\n \n # store the data struct\n if not os.path.isdir(out_path):\n os.makedirs(out_path)\n out_file = os.path.join(out_path, 'mpi_inf_3dhp_train.npz')\n if fits_3d is not None:\n fits_3d = np.load(fits_3d)\n np.savez(out_file, imgname=imgnames_,\n center=centers_,\n scale=scales_,\n part=parts_,\n pose=fits_3d['pose'],\n shape=fits_3d['shape'],\n has_smpl=fits_3d['has_smpl'],\n S=Ss_,\n openpose=openposes_)\n else:\n np.savez(out_file, imgname=imgnames_,\n center=centers_,\n scale=scales_,\n part=parts_,\n S=Ss_,\n openpose=openposes_) \n \n \ndef test_data(dataset_path, out_path, joints_idx, scaleFactor):\n\n joints17_idx = [14, 11, 12, 13, 8, 9, 10, 15, 1, 16, 0, 5, 6, 7, 2, 3, 4]\n\n imgnames_, scales_, centers_, parts_, Ss_ = [], [], [], [], []\n\n # training data\n user_list = range(1,7)\n\n for user_i in user_list:\n seq_path = os.path.join(dataset_path,\n 'mpi_inf_3dhp_test_set',\n 'TS' + str(user_i))\n # mat file with annotations\n annot_file = os.path.join(seq_path, 'annot_data.mat')\n mat_as_h5 = h5py.File(annot_file, 'r')\n annot2 = np.array(mat_as_h5['annot2'])\n annot3 = np.array(mat_as_h5['univ_annot3'])\n valid = np.array(mat_as_h5['valid_frame'])\n for frame_i, valid_i in enumerate(valid):\n if valid_i == 0:\n continue\n img_name = os.path.join('mpi_inf_3dhp_test_set',\n 'TS' + str(user_i),\n 'imageSequence',\n 'img_' + str(frame_i+1).zfill(6) + '.jpg')\n\n joints = annot2[frame_i,0,joints17_idx,:]\n S17 = annot3[frame_i,0,joints17_idx,:]/1000\n S17 = S17 - S17[0]\n\n bbox = [min(joints[:,0]), min(joints[:,1]),\n max(joints[:,0]), max(joints[:,1])]\n center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]\n scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200\n\n # check that all joints are visible\n img_file = os.path.join(dataset_path, img_name)\n I = scipy.misc.imread(img_file)\n h, w, _ = I.shape\n x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)\n y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)\n ok_pts = np.logical_and(x_in, y_in)\n if np.sum(ok_pts) < len(joints_idx):\n continue\n\n part = np.zeros([24,3])\n part[joints_idx] = np.hstack([joints, np.ones([17,1])])\n\n S = np.zeros([24,4])\n S[joints_idx] = np.hstack([S17, np.ones([17,1])])\n\n # store the data\n imgnames_.append(img_name)\n centers_.append(center)\n scales_.append(scale)\n parts_.append(part)\n Ss_.append(S)\n\n # store the data struct\n if not os.path.isdir(out_path):\n os.makedirs(out_path)\n out_file = os.path.join(out_path, 'mpi_inf_3dhp_test.npz')\n np.savez(out_file, imgname=imgnames_,\n center=centers_,\n scale=scales_,\n part=parts_,\n S=Ss_) \n\ndef mpi_inf_3dhp_extract(dataset_path, openpose_path, out_path, mode, extract_img=False, static_fits=None):\n\n scaleFactor = 1.2\n joints_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]\n \n if static_fits is not None:\n fits_3d = os.path.join(static_fits, \n 'mpi-inf-3dhp_mview_fits.npz')\n else:\n fits_3d = None\n \n if mode == 'train':\n train_data(dataset_path, openpose_path, out_path, \n joints_idx, scaleFactor, extract_img=extract_img, fits_3d=fits_3d)\n elif mode == 'test':\n test_data(dataset_path, out_path, joints_idx, scaleFactor)\n"
] | [
[
"scipy.io.loadmat",
"numpy.load",
"numpy.sum",
"numpy.ones",
"numpy.zeros",
"numpy.savez",
"numpy.reshape",
"numpy.logical_and",
"numpy.array"
]
] |
4k4xs4pH1r3/tf_rl_tutorial | [
"c58d10c60cfd79b2e0661b4a49cccae8d4584c57"
] | [
"tf_rl_tutorial/models.py"
] | [
"# Copyright 2016 Mandiant, A FireEye Company\n# Authors: Brian Jones\n# License: Apache 2.0\n\n''' Model classes for \"Relational Learning with TensorFlow\" tutorial '''\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .util import ContrastiveTrainingProvider\n\n\ndef least_squares_objective(output, target, add_bias=True):\n ''' Creates final model output and loss for least squares objective\n\n Args:\n output: Model output\n target: Training target placeholder\n add_bias: If True, a bias Variable will be added to the output\n\n Returns:\n tuple (final output, loss)\n '''\n y = output\n if add_bias:\n bias = tf.Variable([0.0])\n y = output + bias\n loss = tf.reduce_sum(tf.square(y - target))\n return y, loss\n\n\ndef logistic_objective(output, target, add_bias=True):\n ''' Creates final model output and loss for logistic objective\n\n Args:\n output: Model output\n target: Training target placeholder\n add_bias: If True, a bias Variable will be added to the output\n\n Returns:\n tuple (final output, loss)\n '''\n y = output\n if add_bias:\n bias = tf.Variable([0.0])\n y = output + bias\n sig_y = tf.clip_by_value(tf.sigmoid(y), 0.001, 0.999) # avoid NaNs\n loss = -tf.reduce_sum(target*tf.log(sig_y) + (1-target)*tf.log(1-sig_y))\n return sig_y, loss\n\n\ndef ranking_margin_objective(output, margin=1.0):\n ''' Create final model output and loss for pairwise ranking margin objective\n \n Loss for single pair (f(p), f(n)) = [margin - f(p) + f(n)]+\n This only works when given model output on alternating positive/negative \n pairs: [pos,neg,pos,neg,...]. TODO: check target placeholder\n at runtime to make sure this is the case?\n \n Args:\n output: Model output\n margin: The margin value for the pairwise hinge loss\n\n Returns:\n tuple (final output, loss)\n '''\n y_pairs = tf.reshape(output, [-1,2]) # fold: 1 x n -> [n/2 x 2]\n pos_scores, neg_scores = tf.split(1, 2, y_pairs) # separate pairs\n hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores)\n total_hinge_loss = tf.reduce_sum(hinge_losses)\n return output, total_hinge_loss\n\n\ndef sparse_maxnorm_update(var_matrix, indices, maxnorm=1.0):\n '''Sparse update operation that ensures selected rows in var_matrix \n do not have a Euclidean norm greater than maxnorm. Rows that exceed \n it are scaled to length.\n \n Args:\n var_matrix: 2D mutable tensor (Variable) to operate on\n indices: 1D tensor with the row indices to constrain\n maxnorm: the maximum Euclidean norm\n\n Returns:\n An operation that will update var_matrix when run in a Session\n '''\n selected_rows = tf.nn.embedding_lookup(var_matrix, indices)\n row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1))\n scaling = maxnorm / tf.maximum(row_norms, maxnorm)\n scaled = selected_rows * tf.expand_dims(scaling, 1)\n return tf.scatter_update(var_matrix, indices, scaled)\n\n\ndef dense_maxnorm_update(var_matrix, maxnorm=1.0):\n '''Dense update operation that ensures all rows in var_matrix \n do not have a Euclidean norm greater than maxnorm. Rows that exceed \n it are scaled to length.\n \n Args:\n var_matrix: 2D mutable tensor (Variable) to operate on\n maxnorm: the maximum Euclidean norm\n \n Returns:\n An operation that will update var_matrix when run in a Session\n '''\n row_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))\n scaling = maxnorm / tf.maximum(row_norms, maxnorm)\n scaled = var_matrix * tf.expand_dims(scaling, 1)\n return tf.assign(var_matrix, scaled)\n\n\ndef dense_maxnorm(var_matrix, maxnorm=1.0):\n '''Similar to dense_maxnorm_update(), except this returns a new Tensor\n instead of an operation that modifies var_matrix.\n\n Args:\n var_matrix: 2D tensor (Variable)\n maxnorm: the maximum Euclidean norm\n\n Returns:\n A new tensor where all rows have been scaled as necessary\n '''\n axis_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))\n scaling = maxnorm / tf.maximum(axis_norms, maxnorm)\n return var_matrix * tf.expand_dims(scaling, 1)\n\n\nclass BaseModel(object):\n ''' Base class for embedding-based relational learning models that use \n maxnorm regularization. Subclasses must implement _create_model() and\n populate self.train_step, and can optionally populate self.post_step for \n post-processing.\n \n Note: When model_type is 'ranking_margin', the mini-batch provider returned\n by _create_batch_provider() must provide instances in alternating \n pos/neg pairs: [pos, neg, pos, neg, ...]. This is satisfied when using \n ContrastiveTrainingProvider; be careful if you use a different one.\n \n Args:\n embedding_size: Embedding vector length\n maxnorm: Maximum Euclidean norm for embedding vectors\n batch_pos_cnt: Number of positive examples to use in each mini-batch\n max_iter: Maximum number of optimization iterations to perform\n model_type: Possible values:\n 'least_squares': squared loss on 0/1 targets\n 'logistic': sigmoid link function, crossent loss on 0/1 targets\n 'ranking_margin': ranking margin on pos/neg pairs\n add_bias: If True, a bias Variable will be added to the output for\n least_squares and logistic models. \n opt: An optimizer object to use. If None, the default optimizer is\n tf.train.AdagradOptimizer(1.0) \n \n TODO: add support for other regularizers like L2 \n '''\n\n def __init__(self, embedding_size, maxnorm=1.0,\n batch_pos_cnt=100, max_iter=1000, \n model_type='least_squares', add_bias=True, \n opt=None):\n self.embedding_size = embedding_size\n self.maxnorm = maxnorm\n self.batch_pos_cnt = batch_pos_cnt\n self.max_iter = max_iter\n self.model_type = model_type\n self.add_bias = add_bias\n if opt is None:\n opt = tf.train.AdagradOptimizer(1.0)\n self.opt = opt\n self.sess = None\n self.train_step = None\n self.post_step = None\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.head_input = tf.placeholder(tf.int32, shape=[None])\n self.rel_input = tf.placeholder(tf.int32, shape=[None])\n self.tail_input = tf.placeholder(tf.int32, shape=[None])\n self.target = tf.placeholder(tf.float32, shape=[None])\n\n def _create_model(self, train_triples):\n ''' Subclasses must build Graph and set self.train_step '''\n raise Exception('subclass must implement')\n \n def _create_batch_provider(self, train_triples):\n ''' Default implementation '''\n return ContrastiveTrainingProvider(train_triples, self.batch_pos_cnt)\n \n def _create_output_and_loss(self, raw_output):\n if self.model_type == 'least_squares':\n return least_squares_objective(raw_output, self.target, self.add_bias)\n elif self.model_type == 'logistic':\n return logistic_objective(raw_output, self.target, self.add_bias)\n elif self.model_type == 'ranking_margin':\n return ranking_margin_objective(raw_output, 1.0)\n else:\n raise Exception('Unknown model_type')\n \n def _norm_constraint_op(self, var_matrix, row_indices, maxnorm): \n '''\n Args:\n var_matrix: A 2D Tensor holding the vectors to constrain (in rows)\n row_indices: The rows in var_tensor that are being considered for\n constraint application (typically embedding vectors for \n entities observed for a minibatch of training data). These \n will be used for a sparse variable update operation if the\n chosen optimizer only modified these entries. Otherwise \n a dense operation is used and row_indices are ignored.\n maxnorm: The maximum Euclidean norm for the rows in var_tensor\n \n Returns:\n An operation which will apply the constraints when run in a Session\n '''\n # Currently, TF optimizers do not update variables with zero gradient\n # except AdamOptimizer\n if isinstance(self.opt, tf.train.AdamOptimizer):\n return dense_maxnorm_update(var_matrix, maxnorm)\n else:\n return sparse_maxnorm_update(var_matrix, row_indices, maxnorm)\n \n def embeddings(self):\n ''' Subclass should override this if it uses different embedding\n variables\n \n Returns:\n A list of pairs: [(embedding name, embedding 2D Tensor)]\n '''\n return [('entity', self.entity_embedding_vars),\n ('rel', self.rel_embedding_vars)]\n \n def create_feed_dict(self, triples, labels=None, training=False):\n ''' Create a TensorFlow feed dict for relationship triples\n \n Args:\n triples: A numpy integer array of relationship triples, where each \n row contains [head idx, relationship idx, tail idx]\n labels: (optional) A label array for triples\n training: (optional) A flag indicating whether the feed dict is\n for training or test purposes. Useful for things like\n dropout where a dropout_probability variable is set differently\n in the two contexts.\n '''\n feed_dict = {self.head_input: triples[:, 0], \n self.rel_input: triples[:, 1], \n self.tail_input: triples[:, 2]}\n if labels is not None:\n feed_dict[self.target] = labels\n return feed_dict\n \n def close(self):\n ''' Closes the TensorFlow Session object '''\n self.sess.close();\n \n def fit(self, train_triples, step_callback=None):\n ''' Trains the model on relationship triples\n \n Args:\n train_triples: A numpy integer array of relationship triples, where \n each row of contains [head idx, relationship idx, tail idx]\n step_callback: (optional) A function that will be called before each \n optimization step, step_callback(iteration, feed_dict)\n '''\n if self.sess is not None:\n self.sess.close()\n self.sess = tf.Session(graph=self.graph)\n with self.graph.as_default():\n self._create_model(train_triples)\n self.sess.run(tf.initialize_all_variables())\n batch_provider = self._create_batch_provider(train_triples)\n for i in range(self.max_iter):\n batch_triples, batch_labels = batch_provider.next_batch()\n feed_dict = self.create_feed_dict(batch_triples, batch_labels, training=True)\n if step_callback:\n keep_going = step_callback(i, feed_dict)\n if not keep_going:\n break\n self.sess.run(self.train_step, feed_dict)\n if self.post_step is not None:\n self.sess.run(self.post_step, feed_dict)\n\n def predict(self, triples):\n ''' Runs a trained model on the supplied relationship triples. fit()\n must be called before calling this function.\n \n Args:\n triples: A numpy integer array of relationship triples, where each \n row of contains [head idx, relationship idx, tail idx]\n '''\n feed_dict = self.create_feed_dict(triples, training=False)\n return self.sess.run(self.output, feed_dict=feed_dict)\n \n\nclass Contrastive_CP(BaseModel):\n ''' Model with a scoring function based on CANDECOMP/PARAFAC tensor \n decomposition. Optimization differs, however, in the use of maxnorm \n regularization and contrastive negative sampling.\n \n Score for (head i, rel k, tail j) triple is: h_i^T * diag(r_k) * t_j, \n where h_i and t_j are embedding vectors for the head and tail entities, \n and r_k is an embedding vector for the relationship type.\n \n Args:\n embedding_size: Embedding vector length\n maxnorm: Maximum Euclidean norm for embedding vectors\n batch_pos_cnt: Number of positive examples to use in each mini-batch\n max_iter: Maximum number of optimization iterations to perform\n model_type: Possible values:\n 'least_squares': squared loss on 0/1 targets\n 'logistic': sigmoid link function, crossent loss on 0/1 targets\n 'ranking_margin': ranking margin on pos/neg pairs\n add_bias: If True, a bias Variable will be added to the output for\n least_squares and logistic models.\n opt: An optimizer object to use. If None, the default optimizer is\n tf.train.AdagradOptimizer(1.0) \n\n References:\n Kolda, Tamara G., and Brett W. Bader. \"Tensor decompositions and \n applications.\" SIAM review 51.3 (2009): 455-500.\n '''\n \n def _create_model(self, train_triples):\n # Count unique items to determine embedding matrix sizes\n head_cnt = len(set(train_triples[:,0]))\n rel_cnt = len(set(train_triples[:,1]))\n tail_cnt = len(set(train_triples[:,2]))\n init_sd = 1.0 / np.sqrt(self.embedding_size)\n # Embedding matrices for entities and relationship types\n head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd)\n rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd)\n tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd)\n if self.maxnorm is not None:\n # Ensure maxnorm constraints are initially satisfied\n head_init = dense_maxnorm(head_init, self.maxnorm)\n rel_init = dense_maxnorm(rel_init, self.maxnorm)\n tail_init = dense_maxnorm(tail_init, self.maxnorm)\n self.head_embedding_vars = tf.Variable(head_init)\n self.rel_embedding_vars = tf.Variable(rel_init)\n self.tail_embedding_vars = tf.Variable(tail_init)\n # Embedding layer for each (head, rel, tail) triple being fed in as input\n head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input)\n rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)\n tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input)\n # Model output\n raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1)\n self.output, self.loss = self._create_output_and_loss(raw_output)\n # Optimization\n self.train_step = self.opt.minimize(self.loss)\n if self.maxnorm is not None:\n # Post-processing to limit embedding vars to L2 ball\n head_constraint = self._norm_constraint_op(self.head_embedding_vars, \n tf.unique(self.head_input)[0], \n self.maxnorm)\n rel_constraint = self._norm_constraint_op(self.rel_embedding_vars, \n tf.unique(self.rel_input)[0], \n self.maxnorm)\n tail_constraint = self._norm_constraint_op(self.tail_embedding_vars, \n tf.unique(self.tail_input)[0], \n self.maxnorm)\n self.post_step = [head_constraint, rel_constraint, tail_constraint]\n\n def _create_batch_provider(self, train):\n # CP treats head and tail entities separately\n return ContrastiveTrainingProvider(train, \n self.batch_pos_cnt, \n separate_head_tail=True)\n \n def embeddings(self):\n '''\n Returns:\n A list of pairs: [(embedding name, embedding 2D Tensor)]\n '''\n return [('head', self.head_embedding_vars),\n ('tail', self.head_embedding_vars),\n ('rel', self.rel_embedding_vars)]\n\n\nclass Bilinear(BaseModel):\n ''' Model with a scoring function based on the bilinear formulation of \n RESCAL. Optimization differs, however, in the use of maxnorm \n regularization and contrastive negative sampling.\n \n Score for (head i, rel k, tail j) triple is: e_i^T * R_k * e_j\n where e_i and e_j are D-dimensional embedding vectors for the head and tail \n entities, and R_k is a (D x D) matrix for the relationship type\n acting as a bilinear operator.\n \n Args:\n embedding_size: Embedding vector length\n maxnorm: Maximum Euclidean norm for embedding vectors\n rel_maxnorm_mult: Multiplier for the maxnorm threshold used for \n relationship embeddings. Example: If maxnorm=2.0 and \n rel_maxnorm_mult=4.0, then the maxnorm constrain for relationships\n will be 2.0 * 4.0 = 8.0.\n batch_pos_cnt: Number of positive examples to use in each mini-batch \n max_iter: Maximum number of optimization iterations to perform\n model_type: Possible values:\n 'least_squares': squared loss on 0/1 targets\n 'logistic': sigmoid link function, crossent loss on 0/1 targets\n 'ranking_margin': ranking margin on pos/neg pairs\n add_bias: If True, a bias Variable will be added to the output for\n least_squares and logistic models. \n opt: An optimizer object to use. If None, the default optimizer is\n tf.train.AdagradOptimizer(1.0) \n\n References:\n Nickel, Maximilian, Volker Tresp, and Hans-Peter Kriegel. \"A three-way \n model for collective learning on multi-relational data.\" Proceedings of \n the 28th international conference on machine learning (ICML-11). 2011. \n '''\n \n def __init__(self, embedding_size, maxnorm=1.0, rel_maxnorm_mult=3.0, \n batch_pos_cnt=100, max_iter=1000, \n model_type='least_squares', add_bias=True, opt=None):\n super(Bilinear, self).__init__(\n embedding_size=embedding_size,\n maxnorm=maxnorm,\n batch_pos_cnt=batch_pos_cnt,\n max_iter=max_iter,\n model_type=model_type,\n opt=opt)\n self.rel_maxnorm_mult = rel_maxnorm_mult\n \n def _create_model(self, train_triples):\n # Count unique items to determine embedding matrix sizes\n entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))\n rel_cnt = len(set(train_triples[:,1]))\n init_sd = 1.0 / np.sqrt(self.embedding_size)\n # Embedding variables for all entities and relationship types\n entity_embedding_shape = [entity_cnt, self.embedding_size]\n # Relationship embeddings will be stored in flattened format to make \n # applying maxnorm constraints easier\n rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size]\n entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd)\n rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd)\n if self.maxnorm is not None:\n # Ensure maxnorm constraints are initially satisfied\n entity_init = dense_maxnorm(entity_init, self.maxnorm)\n rel_init = dense_maxnorm(rel_init, self.maxnorm)\n self.entity_embedding_vars = tf.Variable(entity_init)\n self.rel_embedding_vars = tf.Variable(rel_init)\n # Embedding layer for each (head, rel, tail) triple being fed in as input\n head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)\n tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)\n rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)\n # Reshape rel_embed into square D x D matrices\n rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size))\n # Reshape head_embed and tail_embed to be suitable for the matrix multiplication\n head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors\n tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors\n head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square)\n # Output needs a squeeze into a 1d vector\n raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col)) \n self.output, self.loss = self._create_output_and_loss(raw_output)\n # Optimization\n self.train_step = self.opt.minimize(self.loss)\n if self.maxnorm is not None:\n # Post-processing to limit embedding vars to L2 ball\n rel_maxnorm = self.maxnorm * self.rel_maxnorm_mult\n unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]\n unique_rel_indices = tf.unique(self.rel_input)[0]\n entity_constraint = self._norm_constraint_op(self.entity_embedding_vars, \n unique_ent_indices, \n self.maxnorm)\n rel_constraint = self._norm_constraint_op(self.rel_embedding_vars, \n unique_rel_indices, \n rel_maxnorm)\n self.post_step = [entity_constraint, rel_constraint]\n\n\nclass TransE(BaseModel):\n ''' TransE: Translational Embeddings Model\n \n Score for (head i, rel k, tail j) triple is: d(e_i + t_k, e_i) \n where e_i and e_j are D-dimensional embedding vectors for the head and\n tail entities, t_k is a another D-dimensional vector acting as a \n translation, and d() is a dissimilarity function like Euclidean distance.\n \n Optimization is performed uing SGD on ranking margin loss between \n contrastive training pairs. Entity embeddings are contrained to lie within\n the unit L2 ball, relationship vectors are left unconstrained.\n \n Args:\n embedding_size: Embedding vector length\n batch_pos_cnt: Number of positive examples to use in each mini-batch \n max_iter: Maximum number of optimization iterations to perform\n dist: Distance function used in loss:\n 'euclidean': sqrt(sum((x - y)^2))\n 'sqeuclidean': squared Euclidean, sum((x - y)^2)\n 'manhattan': sum of absolute differences, sum(|x - y|) \n margin: Margin parameter for parwise ranking hinge loss \n opt: An optimizer object to use. If None, the default optimizer is\n tf.train.AdagradOptimizer(1.0) \n \n References:\n Bordes, Antoine, et al. \"Translating embeddings for modeling multi-relational \n data.\" Advances in Neural Information Processing Systems. 2013.\n '''\n def __init__(self, embedding_size, batch_pos_cnt=100, \n max_iter=1000, dist='euclidean', \n margin=1.0, opt=None):\n super(TransE, self).__init__(embedding_size=embedding_size,\n maxnorm=1.0,\n batch_pos_cnt=batch_pos_cnt,\n max_iter=max_iter,\n model_type='ranking_margin',\n opt=opt)\n self.dist = dist\n self.margin = margin\n self.EPS = 1e-3 # for sqrt gradient when dist='euclidean'\n \n def _create_model(self, train_triples):\n # Count unique items to determine embedding matrix sizes\n entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))\n rel_cnt = len(set(train_triples[:,1]))\n init_sd = 1.0 / np.sqrt(self.embedding_size)\n # Embedding variables\n entity_var_shape = [entity_cnt, self.embedding_size]\n rel_var_shape = [rel_cnt, self.embedding_size]\n entity_init = tf.truncated_normal(entity_var_shape, stddev=init_sd)\n rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd)\n # Ensure maxnorm constraints are initially satisfied\n entity_init = dense_maxnorm(entity_init, self.maxnorm)\n self.entity_embedding_vars = tf.Variable(entity_init)\n self.rel_embedding_vars = tf.Variable(rel_init)\n # Embedding layer for each (head, rel, tail) triple being fed in as input\n head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)\n tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)\n rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)\n # Relationship vector acts as a translation in entity embedding space\n diff_vec = tail_embed - (head_embed + rel_embed)\n # negative dist so higher scores are better (important for pairwise loss)\n if self.dist == 'manhattan':\n raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1)\n elif self.dist == 'euclidean':\n # +eps because gradients can misbehave for small values in sqrt\n raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS)\n elif self.dist == 'sqeuclidean':\n raw_output = -tf.reduce_sum(tf.square(diff_vec), 1)\n else:\n raise Exception('Unknown distance type')\n # Model output\n self.output, self.loss = ranking_margin_objective(raw_output, self.margin)\n # Optimization with postprocessing to limit embedding vars to L2 ball\n self.train_step = self.opt.minimize(self.loss)\n unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]\n self.post_step = self._norm_constraint_op(self.entity_embedding_vars, \n unique_ent_indices, \n self.maxnorm)"
] | [
[
"tensorflow.initialize_all_variables",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.abs",
"tensorflow.concat",
"tensorflow.Variable",
"tensorflow.split",
"tensorflow.reduce_sum",
"tensorflow.Graph",
"tensorflow.expand_dims",
"tensorflow.mul",
"tensorflow.batch_matmul",
"tensorflow.assign",
"tensorflow.scatter_update",
"tensorflow.Session",
"tensorflow.nn.embedding_lookup",
"tensorflow.placeholder",
"tensorflow.truncated_normal",
"tensorflow.unique",
"tensorflow.train.AdagradOptimizer",
"tensorflow.square",
"numpy.sqrt",
"tensorflow.log",
"tensorflow.nn.relu",
"tensorflow.maximum"
]
] |
aspuru-guzik-group/kraken | [
"4eaad505c1343e6083032b4a3fda47e004e19734"
] | [
"conf_selection_and_DFT/PL_dft_library_201027.py"
] | [
"# 201005: rename/restructure .yml files for consistency with xtb-level data\r\n# 201006: in read_conformer() fix error message when log files are missing \r\n\r\nimport os,re,itertools,time\r\n#import pybel\r\n#from openbabel import pybel\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pathlib as pl\r\ncwd = pl.Path.cwd()\r\nimport yaml\r\nfrom yaml import CLoader as Loader\r\nfrom yaml import CDumper as Dumper\r\nfrom rdkit import Chem,Geometry\r\nfrom rdkit.Chem import rdmolfiles, AllChem, rdMolAlign,rdmolops\r\nfrom multiprocessing import Pool\r\n\r\nimport morfeus # Kjell Jorner\r\nfrom PL_split_logs_201006 import split_log # TG\r\nfrom PL_conformer_selection_200411 import mirror_mol, delete_element_from_rdkitmol, delete_haloalkane_halides # TG #changed from PL_conformer_selection_201019 5/17/21 by EP\r\nimport PL_gaussian_properties_201021 as gp # TG\r\nimport vmin4 as vmin # TG/Iris Guo\r\nimport P_int_200916 as P_int # Robert Pollice (,TG(,ML))\r\n# import PL_visvol as visvol # Ellyn Peters\r\n\r\n# covalent radii, from Pyykko and Atsumi, Chem. Eur. J. 15, 2009, 188-197\r\n# values for metals decreased by 10% according to Robert Paton's Sterimol implementation\r\nrcov = {\r\n\"H\": 0.32,\"He\": 0.46,\"Li\": 1.2,\"Be\": 0.94,\"B\": 0.77,\"C\": 0.75,\"N\": 0.71,\"O\": 0.63,\"F\": 0.64,\"Ne\": 0.67,\"Na\": 1.4,\"Mg\": 1.25,\"Al\": 1.13,\"Si\": 1.04,\"P\": 1.1,\"S\": 1.02,\"Cl\": 0.99,\"Ar\": 0.96,\"K\": 1.76,\"Ca\": 1.54,\"Sc\": 1.33,\"Ti\": 1.22,\"V\": 1.21,\"Cr\": 1.1,\"Mn\": 1.07,\"Fe\": 1.04,\"Co\": 1.0,\"Ni\": 0.99,\"Cu\": 1.01,\"Zn\": 1.09,\"Ga\": 1.12,\"Ge\": 1.09,\"As\": 1.15,\"Se\": 1.1,\"Br\": 1.14,\"Kr\": 1.17,\"Rb\": 1.89,\"Sr\": 1.67,\"Y\": 1.47,\"Zr\": 1.39,\"Nb\": 1.32,\"Mo\": 1.24,\"Tc\": 1.15,\"Ru\": 1.13,\"Rh\": 1.13,\"Pd\": 1.08,\"Ag\": 1.15,\"Cd\": 1.23,\"In\": 1.28,\"Sn\": 1.26,\"Sb\": 1.26,\"Te\": 1.23,\"I\": 1.32,\"Xe\": 1.31,\"Cs\": 2.09,\"Ba\": 1.76,\"La\": 1.62,\"Ce\": 1.47,\"Pr\": 1.58,\"Nd\": 1.57,\"Pm\": 1.56,\"Sm\": 1.55,\"Eu\": 1.51,\"Gd\": 1.52,\"Tb\": 1.51,\"Dy\": 1.5,\"Ho\": 1.49,\"Er\": 1.49,\"Tm\": 1.48,\"Yb\": 1.53,\"Lu\": 1.46,\"Hf\": 1.37,\"Ta\": 1.31,\"W\": 1.23,\"Re\": 1.18,\"Os\": 1.16,\"Ir\": 1.11,\"Pt\": 1.12,\"Au\": 1.13,\"Hg\": 1.32,\"Tl\": 1.3,\"Pb\": 1.3,\"Bi\": 1.36,\"Po\": 1.31,\"At\": 1.38,\"Rn\": 1.42,\"Fr\": 2.01,\"Ra\": 1.81,\"Ac\": 1.67,\"Th\": 1.58,\"Pa\": 1.52,\"U\": 1.53,\"Np\": 1.54,\"Pu\": 1.55\r\n}\r\n\r\n# some constants\r\nR = 0.0019872036 #kcal mol^-1 K^-1\r\nT = 298.15 #K\r\nhartree_kcalmol = 627.50947 \r\n\r\nperiodictable = [\"Bq\",\"H\",\"He\",\"Li\",\"Be\",\"B\",\"C\",\"N\",\"O\",\"F\",\"Ne\",\"Na\",\"Mg\",\"Al\",\"Si\",\"P\",\"S\",\"Cl\",\"Ar\",\"K\",\"Ca\",\"Sc\",\"Ti\",\"V\",\"Cr\",\"Mn\",\"Fe\",\"Co\",\"Ni\",\"Cu\",\"Zn\",\"Ga\",\"Ge\",\"As\",\"Se\",\"Br\",\"Kr\",\"Rb\",\"Sr\",\"Y\",\"Zr\",\"Nb\",\"Mo\",\"Tc\",\"Ru\",\"Rh\",\"Pd\",\"Ag\",\"Cd\",\"In\",\"Sn\",\"Sb\",\"Te\",\"I\",\"Xe\",\"Cs\",\"Ba\",\"La\",\"Ce\",\"Pr\",\"Nd\",\"Pm\",\"Sm\",\"Eu\",\"Gd\",\"Tb\",\"Dy\",\"Ho\",\"Er\",\"Tm\",\"Yb\",\"Lu\",\"Hf\",\"Ta\",\"W\",\"Re\",\"Os\",\"Ir\",\"Pt\",\"Au\",\"Hg\",\"Tl\",\"Pb\",\"Bi\",\"Po\",\"At\",\"Rn\",\"Fr\",\"Ra\",\"Ac\",\"Th\",\"Pa\",\"U\",\"Np\",\"Pu\",\"Am\",\"Cm\",\"Bk\",\"Cf\",\"Es\",\"Fm\",\"Md\",\"No\",\"Lr\",\"Rf\",\"Db\",\"Sg\",\"Bh\",\"Hs\",\"Mt\",\"Ds\",\"Rg\",\"Uub\",\"Uut\",\"Uuq\",\"Uup\",\"Uuh\",\"Uus\",\"Uuo\",\"X\"]\r\n\r\ndef get_conmat(elements, coords): \r\n # partially based on code from Robert Paton's Sterimol script, which based this part on Grimme's D3 code\r\n # elements is a list of strings, coords is a numpy array or nested list of shape N_atoms x 3\r\n if type(coords) == list:\r\n coords = np.asarray(coords)\r\n natom = len(elements)\r\n #max_elem = 94\r\n k1 = 16.0\r\n k2 = 4.0/3.0\r\n conmat = np.zeros((natom,natom))\r\n for i in range(0,natom):\r\n if elements[i] not in rcov.keys():\r\n continue\r\n for iat in range(0,natom):\r\n if elements[iat] not in rcov.keys():\r\n continue\r\n if iat != i:\r\n dxyz = coords[iat]-coords[i]\r\n r = np.linalg.norm(dxyz)\r\n rco = rcov[elements[i]]+rcov[elements[iat]]\r\n rco = rco*k2\r\n rr=rco/r\r\n damp=1.0/(1.0+np.math.exp(-k1*(rr-1.0)))\r\n if damp > 0.85: #check if threshold is good enough for general purpose\r\n conmat[i,iat],conmat[iat,i] = 1,1\r\n return(conmat)\r\n \r\ndef add_valence(elements,coords,conmat,base_idx,add_element=\"Pd\"):\r\n # Adds a valence to base so that the angle to the previous substituents is maximized and reorders the coordinate output for convenience\r\n # add_element: add any of the following elements:\r\n distpx = {\"O\":1.5,\"Se\":2.12,\"Pd\":2.28,\"X\":1.8} # typical bond distances to P\r\n if type(coords) == list:\r\n coords = np.asarray(coords)\r\n num_atoms = len(elements)\r\n coord_base = coords[base_idx]\r\n base_element = elements[base_idx]\r\n vec = np.array([0.0,0.0,0.0])\r\n bonded = []\r\n for atom in range(num_atoms):\r\n if conmat[base_idx][atom]:\r\n bonded.append(atom)\r\n vec += coord_base - coords[atom]\r\n coordox = distpx[add_element]*vec/np.linalg.norm(vec) + coord_base\r\n atoms = [x for x in range(num_atoms+1)]\r\n coords_temp = np.vstack((coords,coordox))\r\n if sum(get_conmat(elements+[add_element],coords_temp)[-1]) != 1.0:\r\n print(\" Warning: possible collision!\")\r\n # sort coordinates so that base is first, add_element is second, and the other atoms bonded to base are next\r\n elements_new = [base_element,add_element]+[elements[a] for a in bonded] + [a for i,a in enumerate(elements) if i not in [base_idx]+bonded]\r\n coords_new = np.vstack((coord_base, coordox, coords[bonded], coords[[i for i,a in enumerate(elements) if i not in [base_idx]+bonded]]))\r\n return(elements_new, coords_new)\r\n\r\ndef write_xyz(elements,coords,filename):\r\n with open(filename,\"w\") as f:\r\n f.write(f\"{len(elements)}\\n\\n\")\r\n for i,a in enumerate(elements):\r\n f.write(f\"{a.title():>3} \" + \" \".join([f\"{coords[i][j]:15f}\" for j in range(3)]) + \"\\n\")\r\n\r\ndef rmsd_matrix(conformers):\r\n molobjects = [rdmolfiles.MolFromMolFile(str(cwd/conformer/f\"{conformer}_opt.sdf\"),removeHs=False,strictParsing=False) for conformer in conformers]\r\n molobjects = [Chem.RemoveHs(mol) for mol in molobjects] # Remove all H: optional but speeds up RMSD calculation\r\n molobjects = [delete_haloalkane_halides(mol) for mol in molobjects] # Remove halides in perhaloalkyl moieties. Improves RMSD matching and timing\r\n molobjects_inv = [mirror_mol(mol) for mol in molobjects] # create mirror images of each conformer\r\n rmsd_mat = np.zeros((len(conformers),len(conformers)))\r\n for i,j in itertools.product(range(len(conformers)),range(len(conformers))):\r\n if i<j: continue\r\n if i==j: \r\n rmsd_mat[i,j] = 1\r\n else:\r\n rmsd_mat[i,j] = min((rdMolAlign.GetBestRMS(molobjects[i],molobjects[j]),rdMolAlign.GetBestRMS(molobjects[i],molobjects_inv[j])))\r\n rmsd_mat[j,i] = rmsd_mat[i,j]\r\n return(rmsd_mat)\r\n\r\ndef dict_key_rmsd(candidate_pair):\r\n return float(rmsd_matrix(candidate_pair)[0,1])\r\n\r\n\r\n# which energies to read from which log-file\r\nenergylogs = {\r\n\"e_dz\":\"freq\",\r\n\"e_tz_gas\":\"nbo\",\r\n\"e_tz_gas\":\"sp\",\r\n\"e_tz_solv\":\"solv\",\r\n\"e_tz_ra\":\"ra\",\r\n\"e_tz_rc\":\"rc\",\r\n}\r\n\r\n# which properties to read from which log-file\r\nproplogs = {\r\n\"freq\":[\"nimag\",\"g\",\"t\"],\r\n\"sp\" :[\"dipole\",\"homo\",\"qpole\",\"t\"],\r\n\"ra\" :[\"homo\",\"nbo\",\"t\"],\r\n\"rc\" :[\"homo\",\"nbo\",\"t\"],\r\n\"nbo\" :[\"nbo\",\"nborbsP\",\"t\"],\r\n\"nmr\" :[\"nmr\",\"t\"],\r\n\"efg\" :[\"efg\",\"nuesp\",\"t\"],\r\n\"solv\":[\"ecds\",\"t\"],\r\n}\r\n\r\n# assign names to each descriptor\r\npropoutput = {\r\n\"freq_g\": [\"\",\"g\"],\r\n\"freq_nimag\": [\"nimag\"],\r\n\"sp_dipole\": [\"dipolemoment\",],\r\n\"sp_homo\": [\"fmo_e_homo\",\"fmo_e_lumo\",\"fmo_mu\",\"fmo_eta\",\"fmo_omega\"],\r\n\"ra_homo\":[\"somo_ra\",\"\",\"\",\"\",\"\"],\r\n\"rc_homo\":[\"somo_rc\",\"\",\"\",\"\",\"\"],\r\n\"sp_qpole\": [\"qpole_amp\",\"qpoletens_xx\",\"qpoletens_yy\",\"qpoletens_zz\"],\r\n\"nbo_nbo\": [\"nbo_P\"],\r\n\"ra_nbo\": [\"nbo_P_ra\",\"spindens_P_ra\"],\r\n\"rc_nbo\": [\"nbo_P_rc\",\"spindens_P_rc\"],\r\n\"nmr_nmr\": [\"nmr_P\",\"nmrtens_sxx_P\",\"nmrtens_syy_P\",\"nmrtens_szz_P\",],\r\n\"efg_efg\": [\"efg_amp_P\",\"efgtens_xx_P\",\"efgtens_yy_P\",\"efgtens_zz_P\"],\r\n\"efg_nuesp\": [\"nuesp_P\",],\r\n\"solv_ecds\": [\"E_solv_cds\"],\r\n\"nbo_dipole\": [\"dipolemoment\",],\r\n\"nbo_homo\": [\"fmo_e_homo\",\"fmo_e_lumo\",\"fmo_mu\",\"fmo_eta\",\"fmo_omega\"],\r\n\"nbo_qpole\": [\"qpole_amp\",\"qpoletens_xx\",\"qpoletens_yy\",\"qpoletens_zz\"],\r\n}\r\n\r\nboltzproperties = ['vmin_vmin','vmin_r','dipolemoment', 'fmo_e_homo', 'fmo_e_lumo', 'fmo_mu', 'fmo_eta', 'fmo_omega', 'somo_ra', 'somo_rc', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'nbo_P', 'nbo_P_ra', 'spindens_P_ra', 'nbo_P_rc', 'spindens_P_rc', 'nmr_P', 'nmrtens_sxx_P', 'nmrtens_syy_P', 'nmrtens_szz_P', 'efg_amp_P', 'efgtens_xx_P', 'efgtens_yy_P', 'efgtens_zz_P', 'nuesp_P', 'E_solv_cds', 'nbo_lp_P_percent_s', 'nbo_lp_P_occ', 'nbo_lp_P_e', 'nbo_bd_e_max', 'nbo_bd_e_avg', 'nbo_bds_e_min', 'nbo_bds_e_avg', 'nbo_bd_occ_min', 'nbo_bd_occ_avg', 'nbo_bds_occ_max', 'nbo_bds_occ_avg', 'E_solv_total', 'E_solv_elstat', 'E_oxidation', 'E_reduction', 'fukui_p', 'fukui_m', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_ratio_vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL',\"Pint_P_int\",\"Pint_dP\",\"Pint_P_min\",\"Pint_P_max\",\"volume\",\"surface_area\",\"sphericity\"] # \"vv_total_visible_volume\",\"vv_proximal_visible_volume\",\"vv_distal_visible_volume\",\"vv_ratio_visible_total\",\"vv_ratio_proxvis_total\",\r\n\r\nmmproperties = ['dipolemoment', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL'] # ,\"vv_total_visible_volume\",\"vv_proximal_visible_volume\",\"vv_distal_visible_volume\",\"vv_ratio_visible_total\",\"vv_ratio_proxvis_total\",\r\n\r\nPintresults = [\"Pint_P_int\",\"Pint_dP\",\"Pint_P_min\",\"Pint_P_max\",\"volume\",\"surface_area\",\"sphericity\"]\r\n\r\ndef morfeus_properties(elements,coordinates,confdata):\r\n # Morfeus: Sterimol, Vbur, pyr\r\n morfdict = {}\r\n if \"pyr_P\" not in confdata.keys() and confdata[\"p_val\"] == 3:\r\n # Pyramidalization - two equivalent measurments P and alpha \r\n pyr = morfeus.Pyramidalization(elements=elements,coordinates=coordinates,atom_index=1,excluded_atoms=[2]) # remove Pd\r\n morfdict[\"pyr_P\"] = float(pyr.P)\r\n morfdict[\"pyr_alpha\"] = float(pyr.alpha)\r\n\r\n if \"vbur_vbur\" not in confdata.keys():\r\n #Buried volume - get quadrant volumes and distal volume \r\n # iterate through P-substituents, aligning the quadrants paralell to each once (= xz_plane definition)\r\n # Metal/point of reference should be 2.28 A away from P\r\n # z_axis_atoms: P \r\n # xz_plane_atoms: each of the substituents once\r\n # keep lowest and highest quadrant and octant volume across all three orientations of the coordinate system\r\n # keep highest difference of any neighboring quadrant volume\r\n # keep volume in each of the two hemispheres \r\n\r\n qvbur_all = np.array([])\r\n qvdist_all = np.array([])\r\n qvtot_all = np.array([])\r\n max_delta_qvbur_all = []\r\n max_delta_qvtot_all = []\r\n ovbur_all = np.array([])\r\n ovtot_all = np.array([])\r\n \r\n for i in range(3):#confdata[\"p_val\"]): \r\n bv = morfeus.BuriedVolume(elements,coordinates,2,excluded_atoms=[2],z_axis_atoms=[1],xz_plane_atoms=[3+i]) \r\n bv.octant_analysis()\r\n bv.compute_distal_volume(method=\"buried_volume\",octants=True)\r\n\r\n vbur = bv.buried_volume # these are identical for each iteration\r\n vdist = bv.distal_volume # \r\n vtot = vbur + vdist # \r\n\r\n qvbur = np.asarray(list(bv.quadrants[\"buried_volume\"].values()))\r\n qvdist = np.asarray(list(bv.quadrants[\"distal_volume\"].values()))\r\n qvtot = qvbur + qvdist\r\n\r\n qvbur_all = np.append(qvbur_all,qvbur)\r\n qvtot_all = np.append(qvtot_all,qvtot)\r\n\r\n max_delta_qvbur_all.append(max([abs(qvbur[j]-qvbur[j-1]) for j in range(4)]))\r\n max_delta_qvtot_all.append(max([abs(qvtot[j]-qvtot[j-1]) for j in range(4)]))\r\n\r\n ovbur = np.asarray(list(bv.octants[\"buried_volume\"].values()))\r\n ovdist = np.asarray(list(bv.octants[\"distal_volume\"].values()))\r\n ovtot = ovbur + ovdist\r\n\r\n ovbur_all = np.append(ovbur_all,ovbur)\r\n ovtot_all = np.append(ovtot_all,ovtot)\r\n\r\n near_vbur = ovbur[4:].sum() # these are identical for each iteration\r\n far_vbur = ovbur[:4].sum() # \r\n near_vtot = ovtot[4:].sum() # \r\n far_vtot = ovtot[:4].sum() # \r\n\r\n morfdict[\"vbur_vbur\"] = vbur\r\n morfdict[\"vbur_vtot\"] = float(vtot)\r\n morfdict[\"vbur_ratio_vbur_vtot\"] = float(vbur/vtot)\r\n\r\n morfdict[\"vbur_qvbur_min\"] = float(min(qvbur_all))\r\n morfdict[\"vbur_qvbur_max\"] = float(max(qvbur_all))\r\n morfdict[\"vbur_qvtot_min\"] = float(min(qvtot_all))\r\n morfdict[\"vbur_qvtot_max\"] = float(max(qvtot_all))\r\n\r\n morfdict[\"vbur_max_delta_qvbur\"] = float(max(max_delta_qvbur_all))\r\n morfdict[\"vbur_max_delta_qvtot\"] = float(max(max_delta_qvtot_all))\r\n\r\n morfdict[\"vbur_ovbur_min\"] = float(min(ovbur_all))\r\n morfdict[\"vbur_ovbur_max\"] = float(max(ovbur_all))\r\n morfdict[\"vbur_ovtot_min\"] = float(min(ovtot_all))\r\n morfdict[\"vbur_ovtot_max\"] = float(max(ovtot_all))\r\n\r\n morfdict[\"vbur_near_vbur\"] = float(near_vbur)\r\n morfdict[\"vbur_far_vbur\"] = float(far_vbur)\r\n morfdict[\"vbur_near_vtot\"] = float(near_vtot)\r\n morfdict[\"vbur_far_vtot\"] = float(far_vtot)\r\n\r\n if \"sterimol_B1\" not in confdata.keys():\r\n # Sterimol\r\n # for Sterimol values matching Rob Paton's implementation:\r\n patonradii = morfeus.helpers.get_radii(elements, radii_type=\"bondi\")\r\n patonradii = np.array(patonradii)\r\n patonradii[patonradii == 1.2] = 1.09\r\n\r\n sterimol = morfeus.Sterimol(elements, coordinates, 2, 1, radii=patonradii, n_rot_vectors=3600)\r\n morfdict[\"sterimol_B1\"] = float(sterimol.B_1_value)\r\n morfdict[\"sterimol_B5\"] = float(sterimol.B_5_value)\r\n morfdict[\"sterimol_L\"] = float(sterimol.L_value)\r\n # buried Sterimol\r\n sterimol_bur = morfeus.Sterimol(elements, coordinates, 2, 1,calculate=False,radii=patonradii, n_rot_vectors=3600)\r\n sterimol_bur.bury(sphere_radius=5.5,method=\"delete\",radii_scale=0.5) \r\n # sterimol.bury(sphere_radius=4.5,method=\"delete\",radii_scale=1) \r\n morfdict[\"sterimol_burB1\"] = float(sterimol_bur.B_1_value)\r\n morfdict[\"sterimol_burB5\"] = float(sterimol_bur.B_5_value)\r\n morfdict[\"sterimol_burL\"] = float(sterimol_bur.L_value)\r\n\r\n return(morfdict)\r\n\r\ndef gp_properties(ligand,conformer,p_idx):\r\n # reads gaussian log files\r\n gpdict = {} \r\n gpdict[\"properties\"] = {}\r\n contents = {\r\n \"streams\":{},\r\n \"filecont\":{},\r\n }\r\n # read energies\r\n for e,log in energylogs.items():\r\n contents[\"streams\"][log] = gp.get_outstreams(cwd/conformer/f\"{conformer}_{log}.log\")\r\n if contents[\"streams\"][log] == \"failed or incomplete job\":\r\n return({\"error\":True})\r\n else:\r\n gpdict[e] = gp.get_e_hf(contents[\"streams\"][log])\r\n gpdict[\"error\"] = False\r\n # going through each log file, get the relevant properties\r\n for log in proplogs.keys():\r\n contents[\"filecont\"][log] = gp.get_filecont(cwd/conformer/f\"{conformer}_{log}.log\")\r\n for prop in proplogs[log]:\r\n gpresults = gp.jobtypes[prop][0](contents[gp.jobtypes[prop][1]][log],p_idx)\r\n if prop == \"nborbsP\": # NBO orbital analysis returns a dictionary with the proper labels \r\n gpdict[\"properties\"].update(gpresults)\r\n elif prop == \"t\": # subjob time\r\n gpdict[f\"{log}_t\"] = gpresults\r\n elif prop in [\"e_dz\",\"g\",\"e_tz_gas\",\"e_tz_solv\",\"e_tz_ra\",\"e_tz_rc\",\"nimag\"]:\r\n gpdict.update({propoutput[f\"{log}_{prop}\"][i]: float(gpresults[i]) for i in range(len(gpresults))})\r\n else: # all other functions return a list. This is assigned into a dict with proper names here\r\n gpdict[\"properties\"].update({propoutput[f\"{log}_{prop}\"][i]: float(gpresults[i]) for i in range(len(gpresults))})\r\n\r\n gpdict[\"g_tz_gas\"] = gpdict[\"g\"] - gpdict[\"e_dz\"] + gpdict[\"e_tz_gas\"] # in Hartree\r\n gpdict[\"g_tz_solv\"] = gpdict[\"g\"] - gpdict[\"e_dz\"] + gpdict[\"e_tz_solv\"] # in Hartree\r\n gpdict[\"properties\"][\"E_solv_total\"] = (gpdict[\"e_tz_solv\"] - gpdict[\"e_tz_gas\"]) * hartree_kcalmol # in kcal/mol\r\n gpdict[\"properties\"][\"E_solv_elstat\"] = gpdict[\"properties\"][\"E_solv_total\"] - gpdict[\"properties\"][\"E_solv_cds\"] # in kcal/mol\r\n gpdict[\"properties\"][\"E_oxidation\"] = gpdict[\"e_tz_rc\"] - gpdict[\"e_tz_gas\"] # in Hartree\r\n gpdict[\"properties\"][\"E_reduction\"] = gpdict[\"e_tz_ra\"] - gpdict[\"e_tz_gas\"] # in Hartree\r\n gpdict[\"properties\"][\"fukui_p\"] = gpdict[\"properties\"][\"nbo_P\"]-gpdict[\"properties\"][\"nbo_P_ra\"] # fukui electrophilicity \r\n gpdict[\"properties\"][\"fukui_m\"] = gpdict[\"properties\"][\"nbo_P_rc\"]-gpdict[\"properties\"][\"nbo_P\"] # fukui nucleophilicity\r\n gpdict[\"t_total\"] = sum([gpdict[f\"{log}_t\"] for log in proplogs.keys()])\r\n if \"\" in gpdict.keys():\r\n del gpdict[\"\"]\r\n if \"\" in gpdict[\"properties\"].keys():\r\n del gpdict[\"properties\"][\"\"]\r\n return(gpdict)\r\n\r\ndef read_conformer(cwd, ligand, conformer): # cwd: pathlib path of current working directory. ligand: 0-digit ligand ID. conformer: full name of the conformer (including the ID at the beginnig)\r\n confdata = {}\r\n errors = []\r\n checklogs = [cwd/conformer/f\"{conformer}_{l}.log\" for l in proplogs.keys() if not (cwd/conformer/f\"{conformer}_{l}.log\").exists()]\r\n if len(checklogs) != 0:\r\n #! log this as a conformer-level error\r\n err = f\"Missing Gaussian log files, flagged in read_conformer: {','.join([chkl.name for chkl in checklogs])}\"\r\n errors.append(err)\r\n print(f\"{ligand};{conformer};{err}\")\r\n with open(cwd/f\"{ligand}_errors.txt\",\"a\") as f:\r\n f.write(f\"{ligand};{conformer};{err}\\n\")\r\n confdata[\"error\"] = True\r\n return(confdata,errors)\r\n\r\n if \"elements_pd\" not in confdata.keys():\r\n # mol = next(pybel.readfile(\"g09\",str(cwd/conformer/f\"{conformer}_nbo.log\")))\r\n #mol = next(pybel.readfile(\"g09\",str(cwd/conformer/f\"{conformer}_opt.log\")))\r\n #elements = [periodictable[a.atomicnum] for a in mol.atoms]\r\n #coordinates = [list(a.coords) for a in mol.atoms]\r\n #coordinates_a = np.array([a.coords for a in mol.atoms])\r\n\r\n def read_gaussian_logfile(fn):\r\n time0=time.time()\r\n read=False\r\n for line in open(fn,\"r\"):\r\n if read:\r\n if \"---\" in line and len(elements)>0:\r\n read=False\r\n if read:\r\n if \"X\" not in line and \"---\" not in line:\r\n atomnum = int(line.split()[1])\r\n #print(line.replace(\"\\n\",\"\"))\r\n #print(atomnum)\r\n el = periodictable[atomnum]\r\n elements.append(el)\r\n coordinates.append([float(line.split()[3]),float(line.split()[4]), float(line.split()[5])])\r\n if \"Coordinates (Angstroms)\" in line:\r\n coordinates, elements = [], []\r\n read=True\r\n time1=time.time()\r\n print(\"gaussian log parser done in %.2f seconds\"%(time1-time0))\r\n return(coordinates, elements)\r\n\r\n coordinates, elements = read_gaussian_logfile(str(cwd/conformer/f\"{conformer}_opt.log\"))\r\n coordinates_a = np.array(coordinates)\r\n\r\n conmat = get_conmat(elements,coordinates_a)\r\n p_idx = [i for i in range(len(elements)) if elements[i] == \"P\" and sum(conmat[i]) <= 3][0] # this removes quaternary P (phosphonium, phosphate etc) but allows for P with 2 substituents (phosphabenzene, phosphaimine etc). Can we be sure that we never have more than one non-quaternary P(III)? \r\n elements_pd, coordinates_pd = add_valence(elements,coordinates,conmat,p_idx,add_element=\"Pd\") # Add \"Pd\" at the reference position in the P-lone pair region\r\n if not (cwd/conformer/f\"{conformer}_opt_Pd.xyz\").exists():\r\n #out = pybel.Outputfile(\"xyz\",str(cwd/conformer/f\"{conformer}_opt.xyz\"))\r\n #out.write(mol)\r\n #out.close()\r\n write_xyz(elements, coordinates, cwd/conformer/f\"{conformer}_opt.xyz\")\r\n #out = pybel.Outputfile(\"sdf\",str(cwd/conformer/f\"{conformer}_opt.sdf\"))\r\n #out.write(mol)\r\n #out.close()\r\n os.system(\"obabel -ixyz %s -osdf >> %s\"%(str(cwd/conformer/f\"{conformer}_opt.xyz\"), str(cwd/conformer/f\"{conformer}_opt.sdf\")))\r\n write_xyz(elements_pd,coordinates_pd,cwd/conformer/f\"{conformer}_opt_Pd.xyz\")\r\n confdata[\"coords\"] = coordinates\r\n confdata[\"coords_pd\"] = coordinates_pd.tolist()\r\n confdata[\"elements\"] = elements\r\n confdata[\"elements_pd\"] = elements_pd\r\n confdata[\"conmat\"] = conmat.tolist()\r\n confdata[\"p_idx\"] = p_idx\r\n confdata[\"p_val\"] = int(sum(conmat[p_idx])) # how many substituents at P\r\n\r\n confdata[\"properties\"] = {}\r\n ## get properties\r\n # gp_properties: everything that can be read from the Gaussian log files (most electronic properties)\r\n confdata.update(gp_properties(ligand,conformer,confdata[\"p_idx\"]))\r\n if confdata[\"error\"]:\r\n #! log this as a conformer-level error\r\n err = \"Error in the Gaussian computations, flagged in read_conformer, please check log files.\"\r\n errors.append(err)\r\n print(f\"{ligand};{conformer};{err}\")\r\n with open(cwd/f\"{ligand}_errors.txt\",\"a\") as f:\r\n f.write(f\"{ligand};{conformer};{err}\\n\")\r\n with open(cwd/conformer/f\"{conformer}_data.yml\",\"w\") as f:\r\n yaml.dump(confdata,f,Dumper=Dumper)\r\n return(confdata,errors)\r\n\r\n if confdata[\"nimag\"] != 0:\r\n #! log this as a conformer-level error\r\n err = f\"Number of imaginary frequencies: {confdata['nimag']}.\"\r\n errors.append(err)\r\n print(f\"{ligand};{conformer};{err}\")\r\n with open(cwd/f\"{ligand}_errors.txt\",\"a\") as f:\r\n f.write(f\"{ligand};{conformer};{err}\\n\")\r\n with open(cwd/conformer/f\"{conformer}_data.yml\",\"w\") as f:\r\n yaml.dump(confdata,f,Dumper=Dumper)\r\n confdata[\"error\"] = True\r\n return(confdata,errors)\r\n\r\n # morfeus: properties that use the geometry/steric properties\r\n confdata[\"properties\"].update(morfeus_properties(confdata[\"elements_pd\"],confdata[\"coords_pd\"],confdata))\r\n\r\n # # P_int\r\n # if \"Pint_P_int\" not in confdata.keys():\r\n # confdata.update(P_int.P_int_main(name=conformer,directory=cwd/conformer))\r\n # read results\r\n disp = \"d3\"\r\n pint_read = P_int.read_dedout(cwd/conformer,conformer,disp)+P_int.read_multiwfnout(cwd/conformer,conformer)+P_int.read_disp(cwd/conformer,conformer,disp)\r\n confdata[\"properties\"].update({Pintresults[i]:float(pint_read[i]) for i in range(7)})\r\n \r\n # V_min\r\n try:\r\n if \"vmin_vmin\" not in confdata.keys():\r\n vminob = vmin.get_vmin(f\"{conformer}.fchk\",str(cwd/conformer)+\"/\",True)\r\n confdata[\"properties\"][\"vmin_vmin\"] = float(vminob.v_min)\r\n confdata[\"properties\"][\"vmin_r\"] = float(vminob.r_min)\r\n except:\r\n err = f\"Vmin FileNotFoundError.\"\r\n errors.append(err)\r\n print(f\"{ligand};{conformer};{err}\")\r\n with open(cwd/f\"{ligand}_errors.txt\",\"a\") as f:\r\n f.write(f\"{ligand};{conformer};{err}\\n\")\r\n confdata[\"error\"] = True\r\n\r\n # visvol\r\n # if \"vv_total_visible_volume\" not in confdata.keys():\r\n # confdata.update(visvol.get_vis_vol(cwd/conformer/f\"{conformer}_opt_Pd.xyz\",radii_type = 'rcov',prox_cutoff = 3.5,ignore_H = 0,write_results = 1, plot = 0))\r\n\r\n with open(cwd/conformer/f\"{conformer}_data.yml\",\"w\") as f:\r\n yaml.dump(confdata,f,Dumper=Dumper)\r\n\r\n return(confdata,errors)\r\n\r\ndef read_ligand(cwd, ligand, conformers, liganddata = {}): # cwd is the ligand-level directory\r\n status = {\"ligandlevel\": [],}\r\n if len(liganddata.keys()) == 0:\r\n if (cwd/f\"{ligand}_data.yml\").exists():\r\n with open(cwd/f\"{ligand}_data.yml\",\"r\") as f:\r\n liganddata = yaml.load(f,Loader=Loader)\r\n if (cwd/f\"{ligand}_confdata.yml\").exists():\r\n with open(cwd/f\"{ligand}_confdata.yml\",\"r\") as f:\r\n liganddata[\"confdata\"] = yaml.load(f,Loader=Loader)\r\n \r\n else:\r\n liganddata = {\r\n \"conformers_all\": conformers, \r\n \"conformers\": conformers.copy(), # Duplicates and computations with errors (including nimag=1) will be removed from this list \r\n \"number_of_conformers\": len(conformers),\r\n \"removed_duplicates\": [],\r\n \"confdata\": {},#{c:{} for c in conformers},\r\n \"boltzmann_averaged_data\": {},\r\n \"min_data\": {},\r\n \"max_data\": {},\r\n \"delta_data\": {},\r\n \"vburminconf_data\": {},\r\n }\r\n\r\n newconfs = 0\r\n for conformer in conformers:\r\n if conformer in liganddata[\"removed_duplicates\"]:\r\n continue\r\n\r\n print(conformer)\r\n if conformer in liganddata[\"confdata\"].keys():\r\n pass\r\n elif (cwd/conformer/f\"{conformer}_data.yml\").exists():\r\n with open(cwd/conformer/f\"{conformer}_data.yml\",\"r\") as f:\r\n liganddata[\"confdata\"][conformer] = yaml.load(f,Loader=Loader)\r\n newconfs += 1\r\n else:\r\n print(\"read conformer data\")\r\n liganddata[\"confdata\"][conformer],status[conformer] = read_conformer(cwd, ligand, conformer) # returns the dictionary with the conformer data and a list with errors\r\n newconfs += 1\r\n\r\n if newconfs > 0:\r\n # error, NIMAG removal\r\n liganddata[\"conformers_w_error\"] = [conformer for conformer in liganddata[\"conformers\"] if liganddata[\"confdata\"][conformer][\"error\"]]\r\n liganddata[\"conformers\"] = [c for c in liganddata[\"conformers\"] if c not in liganddata[\"conformers_w_error\"]]\r\n liganddata[\"number_of_conformers\"] = len(liganddata[\"conformers\"])\r\n energies = [\"e_dz\",\"g\",\"e_tz_gas\",\"g_tz_gas\",\"e_tz_solv\",\"g_tz_solv\"]\r\n liganddata[\"energies\"] = {}\r\n liganddata[\"relative_energies\"] = {}\r\n for e in energies:\r\n liganddata[\"energies\"][e] = {conformer: liganddata[\"confdata\"][conformer][e] for conformer in liganddata[\"conformers\"]}\r\n liganddata[e+\"_min\"] = min(liganddata[\"energies\"][e].values())\r\n liganddata[e+\"_minconf\"] = list(liganddata[\"energies\"][e].keys())[np.argmin(list(liganddata[\"energies\"][e].values()))]\r\n liganddata[\"relative_energies\"][e+\"_rel\"] = {conformer: (liganddata[\"energies\"][e][conformer]-liganddata[e+\"_min\"])*hartree_kcalmol for conformer in liganddata[\"conformers\"]}\r\n\r\n # erel_df = pd.DataFrame(np.array([list(liganddata[e+\"_rel\"].values()) for e in energies]).T ,columns=energies,index=liganddata[\"conformers\"] )\r\n erel_df = pd.DataFrame([liganddata[\"relative_energies\"][e+\"_rel\"] for e in energies],index=energies).T\r\n #liganddata[\"relative_energies_df\"] = erel_df\r\n liganddata[\"relative_energies_dict\"] = erel_df.to_dict()\r\n\r\n # Find duplicates: \r\n # 1) find pairs of conformers that are within E_rel < 0.1 kcal/mol (relative energies seem to be much more reliable than relative free energies)\r\n # 2) check these pairs to also have RMSD < 0.2 A \r\n # 3) Remove the conformer with higher relative free energy\r\n duplicates_candidates = [(i,j) for i,j in itertools.combinations(liganddata[\"conformers\"],2) if abs(erel_df[\"e_dz\"].loc[i] - erel_df[\"e_dz\"].loc[j]) < 0.1]\r\n try:\r\n # Throw a name error here if you wanna only run the except\r\n cores = max(os.cpu_count() - 2, 1)\r\n with Pool(cores) as p:\r\n values = p.map(dict_key_rmsd, duplicates_candidates)\r\n\r\n liganddata[\"rmsd_candidates\"] = {key: value for key, value in zip(duplicates_candidates, values)}\r\n\r\n # The less cool, non-parallel way\r\n #liganddata[\"rmsd_candidates\"] = {candidate_pair: float(rmsd_matrix(candidate_pair)[0,1]) for candidate_pair in duplicates_candidates} # keep all RMSD for potential debugging\r\n liganddata[\"duplicates\"] = [candidate_pair for candidate_pair in liganddata[\"rmsd_candidates\"] if liganddata[\"rmsd_candidates\"][candidate_pair] < 0.2] \r\n \r\n except: # RDkit failed to generate Mol objects and thus could not compute RMSD, or some of the internal structures in those mol files are different despite actually being the same. Default to duplicate detection based on dipole moment and chemical shift similarity\r\n #! log this on ligand level for double-checking\r\n err = \"Warning: RDKit error at duplicate RMSD testing. Please double check.\"\r\n status[\"ligandlevel\"].append(err)\r\n print(f\"{ligand};ligandlevel;{err}\")\r\n with open(cwd/f\"{ligand}_errors.txt\",\"a\") as f:\r\n f.write(f\"{ligand};ligandlevel;{err}\\n\")\r\n \r\n dipole_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata[\"confdata\"][i][\"properties\"][\"dipolemoment\"] - liganddata[\"confdata\"][j][\"properties\"][\"dipolemoment\"]) < 0.025])\r\n nmr_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata[\"confdata\"][i][\"properties\"][\"nmr_P\"] - liganddata[\"confdata\"][j][\"properties\"][\"nmr_P\"]) < 0.1])\r\n liganddata[\"duplicates\"] = sorted(dipole_candidates & nmr_candidates)\r\n\r\n liganddata[\"removed_duplicates\"] = [erel_df.loc[list(pair)][\"g_tz_gas\"].idxmax() for pair in liganddata[\"duplicates\"]]\r\n liganddata[\"conformers\"] = [c for c in liganddata[\"conformers\"] if c not in liganddata[\"removed_duplicates\"]]\r\n liganddata[\"number_of_conformers\"] = len(liganddata[\"conformers\"])\r\n\r\n # Boltzmann averaging \r\n #boltzfacs = {conformer: np.exp(-liganddata[\"relative_energies_df\"][\"g_tz_gas\"].loc[conformer]/(R*T)) for conformer in liganddata[\"conformers\"]}\r\n boltzfacs = {conformer: np.exp(-erel_df[\"g_tz_gas\"].loc[conformer]/(R*T)) for conformer in liganddata[\"conformers\"]}\r\n\r\n Q = sum(boltzfacs.values())\r\n liganddata[\"boltzmann_weights\"] = {conformer: float(boltzfacs[conformer]/Q) for conformer in liganddata[\"conformers\"] } # probability\r\n for prop in boltzproperties:\r\n confsmissingprop = [conf for conf in liganddata[\"conformers\"] if prop not in liganddata[\"confdata\"][conf][\"properties\"].keys()]\r\n if len(confsmissingprop) == 0:\r\n liganddata[\"boltzmann_averaged_data\"][prop] = sum([liganddata[\"boltzmann_weights\"][conf] * liganddata[\"confdata\"][conf][\"properties\"][prop] for conf in liganddata[\"conformers\"]])\r\n else: # if a single conformer is missing a property value, set Boltzmann-average to None\r\n #! log this as a ligand-level error with prop and confsmissingprop\r\n err = f\"Warning: {len(confsmissingprop)}/{len(liganddata['conformers'])} conformers missing values for property {prop}: {','.join(confsmissingprop)}.\"\r\n status[\"ligandlevel\"].append(err)\r\n print(f\"{ligand};ligandlevel;{err}\")\r\n with open(cwd/f\"{ligand}_errors.txt\",\"a\") as f:\r\n f.write(f\"{ligand};ligandlevel;{err}\\n\")\r\n liganddata[\"boltzmann_averaged_data\"][prop] = None\r\n continue\r\n\r\n # \"Condensed\" properties\r\n liganddata[\"vburminconf\"] = liganddata[\"conformers\"][np.argmin([liganddata[\"confdata\"][conf][\"properties\"][\"vbur_vbur\"] for conf in liganddata[\"conformers\"]])]\r\n for prop in mmproperties:\r\n proplist = [liganddata[\"confdata\"][conf][\"properties\"][prop] for conf in liganddata[\"conformers\"] if prop in liganddata[\"confdata\"][conf][\"properties\"].keys()] \r\n # if a single conformer is missing a property value, still perform min/max analysis (Boltzmann-average will be None to indicate missing value(s))\r\n # if all confs are missing this prop, set min/max/delta to None\r\n if len(proplist) == 0:\r\n liganddata[\"min_data\"][prop] = None\r\n liganddata[\"max_data\"][prop] = None\r\n liganddata[\"delta_data\"][prop] = None\r\n liganddata[\"vburminconf_data\"][prop] = None\r\n else:\r\n liganddata[\"min_data\"][prop] = min(proplist)\r\n liganddata[\"max_data\"][prop] = max(proplist)\r\n liganddata[\"delta_data\"][prop] = liganddata[\"max_data\"][prop] - liganddata[\"min_data\"][prop]\r\n liganddata[\"vburminconf_data\"][prop] = liganddata[\"confdata\"][liganddata[\"vburminconf\"]][\"properties\"][prop]\r\n \r\n liganddata[\"time_all\"] = sum([liganddata[\"confdata\"][conf][\"t_total\"] for conf in liganddata[\"conformers_all\"] if \"t_total\" in liganddata[\"confdata\"][conf].keys()])\r\n\r\n with open(cwd/f\"{ligand}_data.yml\",\"w\") as f:\r\n yaml.dump({k:v for k,v in liganddata.items() if k != \"confdata\"},f,Dumper=Dumper)\r\n with open(cwd/f\"{ligand}_confdata.yml\",\"w\") as f:\r\n yaml.dump(liganddata[\"confdata\"],f,Dumper=Dumper)\r\n erel_df.to_csv(cwd/f\"{ligand}_relative_energies.csv\",sep=\";\")\r\n\r\n return(liganddata,status)\r\n\r\n\r\ndef main_split_logs(cwd, ligand):\r\n if not (cwd/\"ERR\").exists():\r\n (cwd/\"ERR\").mkdir()\r\n # if not (cwd/\"done\").exists():\r\n # (cwd/\"done\").mkdir() \r\n conformers = [i.name for i in (cwd/ligand).iterdir() if i.is_dir()]\r\n conformers_good = []\r\n for conformer in conformers:\r\n logs = [i.name for i in (cwd/ligand/conformer).rglob(\"*.log\")]\r\n if f\"{conformer}.log\" in logs and f\"{conformer}_opt.log\" not in logs:\r\n status = split_log(ligand, conformer)\r\n if status != \"Error\":\r\n #(cwd/ligand/conformer/f\"{conformer}.log\").rename(cwd/f\"done/{conformer}.log\")\r\n conformers_good.append(conformer)\r\n return(conformers_good)\r\n\r\nif __name__ == '__main__': \r\n starttime_all = time.time()\r\n\r\n ligname = re.compile(\"[0-9]{8}\") \r\n ligands = sorted([i.name for i in cwd.iterdir() if (ligname.match(i.name) and i.is_dir())])\r\n conformers = {ligand: [i.name for i in (cwd/ligand).iterdir() if i.is_dir()] for ligand in ligands}\r\n\r\n if not (cwd/\"ERR\").exists():\r\n (cwd/\"ERR\").mkdir()\r\n if not (cwd/\"done\").exists():\r\n (cwd/\"done\").mkdir() \r\n\r\n for ligand in ligands:\r\n for conformer in conformers[ligand]:\r\n logs = [i.name for i in (cwd/ligand/conformer).rglob(\"*.log\")]\r\n if f\"{conformer}.log\" in logs and f\"{conformer}_opt.log\" not in logs:\r\n status = split_log(ligand,conformer)\r\n if status != \"Error\":\r\n (cwd/ligand/conformer/f\"{conformer}.log\").rename(cwd/f\"done/{conformer}.log\")\r\n\r\n \r\n if (cwd/\"allligands_data.yml\").exists():\r\n with open(cwd/\"allligands_data.yml\",\"r\") as f:\r\n allliganddata = yaml.load(f,Loader=Loader)\r\n else:\r\n allliganddata = {}\r\n\r\n for ligand in ligands:\r\n print(ligand)\r\n print(conformers[ligand])\r\n if ligand in allliganddata.keys():\r\n allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand],allliganddata[ligand])\r\n else:\r\n allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand])\r\n\r\n with open(cwd/\"allligands_data.yml\",\"w\") as f:\r\n yaml.dump(allliganddata,f,Dumper=Dumper)\r\n\r\n variants = [\"boltz\",\"min\",\"max\",\"delta\",\"vburminconf\"]\r\n columns = [i+\"_boltz\" for i in boltzproperties if i not in mmproperties] + [f\"{i}_{j}\" for i,j in itertools.product(mmproperties,variants)]# + [\"t_total\",\"number_of_conformers\"] \r\n df = pd.DataFrame(columns = columns,index = ligands)\r\n for l in ligands:\r\n for c in columns:\r\n print(allliganddata[l][\"properties\"])\r\n exit()\r\n df.loc[l][c] = allliganddata[l][\"properties\"][c]\r\n df[\"t_total\"] = [allliganddata[l][\"t_total\"] for l in ligands]\r\n df[\"number_of_conformers\"] = [allliganddata[l][\"number_of_conformers\"] for l in ligands]\r\n df.to_csv(\"allligands_data.csv\",sep=\";\")\r\n\r\n print(f\"All done. Total time: {round((time.time()-starttime_all),2)} sec\")\r\n \r\n"
] | [
[
"numpy.vstack",
"numpy.append",
"numpy.zeros",
"numpy.argmin",
"pandas.DataFrame",
"numpy.asarray",
"numpy.exp",
"numpy.math.exp",
"numpy.array",
"numpy.linalg.norm"
]
] |
dptam/text-to-text-transfer-transformer | [
"3662823b126ebf39d9d8ed147a8af0c6973f0ba9"
] | [
"t5/seqio/dataset_providers.py"
] | [
"# Copyright 2021 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Classes for data loading and processing.\n\nDefines Tasks, TaskRegistry, Mixture, and MixtureRegistry\n\"\"\"\n\nimport abc\nimport collections\nimport inspect\nimport json\nimport os\nimport re\nfrom typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, Union\n\nfrom absl import logging\nimport dataclasses\nimport numpy as np\nfrom packaging import version\nfrom t5.seqio import utils\nfrom t5.seqio.feature_converters import FeatureConverter\nfrom t5.seqio.vocabularies import Vocabulary\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\nimport typing_extensions\n\n\n_DEFAULT_FEATURE_KEYS = [\"inputs\", \"targets\"]\n\n_VALID_TASK_NAME_REGEX = re.compile(r\"^[\\w\\d\\._]+$\")\n_MAX_EXAMPLES_TO_MEM_CACHE = 10000\nSHUFFLE_BUFFER_SIZE = 1000\n\n\[email protected](frozen=True)\nclass Feature:\n \"\"\"A container for attributes of output features of data providers.\"\"\"\n vocabulary: Vocabulary\n add_eos: bool = True\n required: bool = True\n dtype: tf.DType = tf.int32\n\n\[email protected](frozen=True)\nclass ShardInfo:\n \"\"\"A container for specifying sharding info.\"\"\"\n index: int\n num_shards: int\n\n\nclass DatasetProviderBase(metaclass=abc.ABCMeta):\n \"\"\"Abstract base for classes that provide a tf.data.Dataset.\"\"\"\n\n @abc.abstractproperty\n def output_features(self) -> Mapping[str, Feature]:\n raise NotImplementedError\n\n @abc.abstractproperty\n def splits(self) -> Sequence[str]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_dataset(\n self,\n sequence_length: int,\n split: str,\n use_cached: bool = False,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None,\n num_epochs: int = 1\n ) -> tf.data.Dataset:\n \"\"\"Returns the requested tf.data.Dataset.\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def num_input_examples(self, split: str) -> int:\n raise NotImplementedError\n\n\nclass DatasetProviderRegistry(object):\n \"\"\"Base for registry of data providers.\n\n Subclasses must wrap `get` method to override the return type for pytype.\n TODO(adarob): Remove the need to override `get`.\n \"\"\"\n # Class variables must be defined in subclasses.\n _REGISTRY: MutableMapping[str, DatasetProviderBase]\n _PROVIDER_TYPE: Type[DatasetProviderBase]\n\n @classmethod\n def add_provider(cls, name: str, provider):\n \"\"\"Adds a data provider instance to the registry.\"\"\"\n if name in cls._REGISTRY:\n raise ValueError(\"Attempting to register duplicate provider: %s\" % name)\n if not isinstance(provider, cls._PROVIDER_TYPE):\n raise ValueError(\n \"Attempting to register a class not of an invalid type. \"\n \"Expecting instance of %s, got %s\" %\n (cls._PROVIDER_TYPE, type(provider).__name__))\n\n cls._REGISTRY[name] = provider\n\n @classmethod\n def add(\n cls,\n name: str,\n provider_cls,\n *provider_args,\n **provider_kwargs\n ):\n \"\"\"Instantiates and adds provider to the registry.\"\"\"\n if not issubclass(provider_cls, cls._PROVIDER_TYPE):\n raise ValueError(\n \"Attempting to register a class not of an invalid type. \"\n \"Expecting instance of %s, got %s\" %\n (cls._PROVIDER_TYPE, provider_cls))\n provider = provider_cls(*provider_args, **provider_kwargs)\n cls.add_provider(name, provider)\n return provider\n\n @classmethod\n def remove(cls, name):\n \"\"\"Remove provider from the registry, if it exists.\"\"\"\n if name in cls._REGISTRY:\n del cls._REGISTRY[name]\n\n @classmethod\n def get(cls, name):\n \"\"\"Returns provider from the registry.\"\"\"\n if name not in cls._REGISTRY:\n raise ValueError(\"Provider name not registered: %s\" % name)\n return cls._REGISTRY[name]\n\n @classmethod\n def names(cls):\n \"\"\"Returns all provider names in registry.\"\"\"\n return cls._REGISTRY.keys()\n\n @classmethod\n def reset(cls):\n \"\"\"Removes all of the registered tasks.\"\"\"\n cls._REGISTRY = {}\n\n @classmethod\n def get_dataset(\n cls,\n name,\n sequence_length,\n split,\n use_cached=False,\n shuffle=True,\n seed=None,\n shard_info=None,\n num_epochs=1):\n \"\"\"Returns the requested tf.data.Dataset.\"\"\"\n return cls.get(name).get_dataset(\n sequence_length=sequence_length, split=split, use_cached=use_cached,\n shuffle=shuffle, seed=seed, shard_info=shard_info,\n num_epochs=num_epochs)\n\n\n# =============================== DataSources ==================================\n\n\nclass DataSource(DatasetProviderBase):\n \"\"\"A `DatasetProvider` that provides raw data from an input source.\n\n Inherits all abstract methods and properties of `DatasetProviderBase` except\n those overidden below.\n \"\"\"\n\n def __init__(\n self,\n splits: Iterable[str],\n num_input_examples: Optional[Mapping[str, int]] = None):\n self._splits = tuple(splits)\n self._num_input_examples = (\n dict(num_input_examples) if num_input_examples is not None else None)\n\n @property\n def splits(self) -> Sequence[str]:\n return self._splits\n\n @property\n def output_features(self) -> Mapping[str, Feature]:\n \"\"\"Override unused property of `DatasetProviderBase`.\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def list_shards(self, split: str) -> Sequence[str]:\n \"\"\"Returns string identifiers of input shards.\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n \"\"\"Overrides base class to add shard identifier and remove use_cached.\n\n Args:\n split: string, the split to return.\n shuffle: bool, whether to shuffle the input source.\n seed: tf.int64 scalar tf.Tensor (or None) for shuffling input source.\n shard_info: optional specification for loading a shard of the split.\n \"\"\"\n raise NotImplementedError\n\n def num_input_examples(self, split: str) -> Optional[int]:\n if self._num_input_examples is None:\n return None\n return self._num_input_examples[split]\n\n\ndef _validate_args(fn, expected_pos_args):\n \"\"\"Ensure function has exactly expected positional args.\"\"\"\n argspec = inspect.getfullargspec(fn)\n expected_pos_args = tuple(expected_pos_args)\n actual_args = tuple(argspec.args)\n if actual_args[:len(expected_pos_args)] != expected_pos_args:\n raise ValueError(\n \"'%s' must have positional args %s, got: %s\" % (\n fn.__name__, expected_pos_args, actual_args))\n actual_pos_args = tuple(\n argspec.args[:-len(argspec.defaults)]\n if argspec.defaults else argspec.args)\n if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:\n raise ValueError(\n \"'%s' may only have positional args %s, got: %s\" % (\n fn.__name__, expected_pos_args, actual_pos_args))\n\n\nclass DatasetFnCallable(typing_extensions.Protocol):\n\n def __call__(self,\n split: str,\n shuffle_files: bool,\n seed: Optional[int] = None) -> tf.data.Dataset:\n ...\n\n\nclass FunctionDataSource(DataSource):\n \"\"\"A `DataSource` that uses a function to provide the input data.\"\"\"\n\n def __init__(\n self,\n dataset_fn: DatasetFnCallable,\n splits: Iterable[str],\n num_input_examples: Optional[Mapping[str, int]] = None\n ):\n \"\"\"FunctionDataSource constructor.\n\n Args:\n dataset_fn: a function with the signature `dataset_fn(split,\n shuffle_files)' (and optionally the variable `seed`) that returns a\n `tf.data.Dataset`.\n splits: an iterable of applicable string split names.\n num_input_examples: dict or None, an optional dictionary mapping split\n to its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n \"\"\"\n _validate_args(dataset_fn, [\"split\", \"shuffle_files\"])\n self._dataset_fn = dataset_fn\n super().__init__(splits=splits, num_input_examples=num_input_examples)\n\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n if shard_info and shard_info.num_shards > 1:\n raise ValueError(\n \"`FunctionDataSource` does not support low-level sharding. Use \"\n \"tf.data.Dataset.shard instead.\")\n\n if seed is None:\n ds = self._dataset_fn(split=split, shuffle_files=shuffle)\n else:\n _validate_args(self._dataset_fn, [\"split\", \"shuffle_files\", \"seed\"])\n ds = self._dataset_fn(split=split, shuffle_files=shuffle, seed=seed)\n return ds\n\n def list_shards(self, split: str) -> Sequence[str]:\n return [split]\n\n\nclass TfdsDataSource(DataSource):\n \"\"\"A `DataSource` that uses TensorFlow Datasets to provide the input data.\"\"\"\n\n def __init__(\n self,\n tfds_name: str,\n tfds_data_dir: Optional[str] = None,\n splits: Optional[Union[Iterable[str], Mapping[str, str]]] = None\n ):\n \"\"\"TfdsTask constructor.\n\n Args:\n tfds_name: string, the name and version number of a TFDS dataset,\n optionally with a config.\n tfds_data_dir: string, an optional path to a specific TFDS data directory\n to use.\n splits: an iterable of allowable string split names, a dict mapping\n allowable canonical splits (e.g., 'validation') to TFDS splits or slices\n (e.g., 'train[':1%']), or None. The default, None, uses all available\n splits from the TFDS dataset info.\n \"\"\"\n if \":\" not in tfds_name:\n raise ValueError(\"TFDS name must contain a version number, got: %s\" %\n tfds_name)\n\n self._tfds_dataset = utils.LazyTfdsLoader(\n tfds_name,\n data_dir=tfds_data_dir,\n split_map=splits if isinstance(splits, dict) else None)\n\n # If splits are not provided, we pass an empty tuple and use the lazy\n # lookup in the `splits` property.\n super().__init__(splits=splits or ())\n\n @property\n def splits(self):\n \"\"\"Overrides since we can't call `info.splits` until after init.\"\"\"\n return self._splits or self._tfds_dataset.info.splits\n\n @property\n def tfds_dataset(self):\n return self._tfds_dataset\n\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n return self.tfds_dataset.load(\n split, shuffle_files=shuffle, seed=seed, shard_info=shard_info)\n\n def num_input_examples(self, split: str) -> int:\n \"\"\"Overrides since we can't call `info.splits` until after init.\"\"\"\n return self.tfds_dataset.size(split)\n\n def list_shards(self, split: str) -> Sequence[str]:\n return self.tfds_dataset.files(split)\n\n\nclass FileDataSource(DataSource):\n \"\"\"A `DataSource` that reads a file to provide the input dataset.\"\"\"\n\n def __init__(\n self,\n read_file_fn: Callable[[tf.data.Dataset], tf.data.Dataset],\n split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],\n num_input_examples: Optional[Mapping[str, int]] = None,\n ):\n \"\"\"FileDataSource constructor.\n\n Args:\n read_file_fn: a callable for creating a `tf.data.Dataset` from a\n `tf.data.Dataset` of file paths, e.g., `tf.data.TFRecordDataset`.\n split_to_filepattern: a mapping from split names to filepatterns to be\n expanded with glob.\n num_input_examples: dict or None, an optional dictionary mapping split\n to its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n \"\"\"\n self._split_to_filepattern = split_to_filepattern\n self._reader = read_file_fn\n super().__init__(\n splits=split_to_filepattern.keys(),\n num_input_examples=num_input_examples)\n\n def get_dataset(\n self,\n split: str,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None\n ) -> tf.data.Dataset:\n files = self.list_shards(split)\n\n if not files:\n raise ValueError(\n \"No file is found for the file pattern: \"\n f\"{self._split_to_filepattern[split]}.\"\n )\n files_ds = tf.data.Dataset.from_tensor_slices(np.array(files, dtype=np.str))\n\n if shard_info:\n if len(files) < shard_info.num_shards:\n raise ValueError(\n f\"Dataset has too few files to shard. {len(files)} files vs \"\n f\"{shard_info.num_shards} shards requested.\")\n files_ds = files_ds.shard(shard_info.num_shards, shard_info.index)\n\n if shuffle:\n files_ds = files_ds.shuffle(buffer_size=16, seed=seed)\n\n return files_ds.interleave(\n self._reader,\n cycle_length=16,\n block_length=16,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n def list_shards(self, split: str) -> Sequence[str]:\n return tf.io.gfile.glob(self._split_to_filepattern[split])\n\n\nclass TextLineDataSource(FileDataSource):\n \"\"\"A `FileDataSource` that reads lines of text from a file as input.\"\"\"\n\n def __init__(\n self,\n split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],\n skip_header_lines: int = 0,\n num_input_examples: Optional[Mapping[str, int]] = None,\n ):\n \"\"\"TextLineDataSource constructor.\n\n Args:\n split_to_filepattern: a mapping from split names to filepatterns to be\n expanded with glob.\n skip_header_lines: int, number of header lines to skip in each source\n file.\n num_input_examples: dict or None, an optional dictionary mapping split to\n its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n \"\"\"\n # Used during caching.\n self._skip_header_lines = skip_header_lines\n\n def read_file_fn(filepattern):\n return tf.data.TextLineDataset(filepattern).skip(skip_header_lines)\n\n super().__init__(\n read_file_fn=read_file_fn,\n split_to_filepattern=split_to_filepattern,\n num_input_examples=num_input_examples)\n\n\nclass TFExampleDataSource(FileDataSource):\n \"\"\"A `FileDataSource` that reads files of tf.train.Example protos as input.\"\"\"\n\n def __init__(\n self,\n split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],\n feature_description: Mapping[str, Union[tf.io.FixedLenFeature,\n tf.io.VarLenFeature]],\n reader_cls: Type[tf.data.Dataset] = tf.data.TFRecordDataset,\n num_input_examples: Optional[Mapping[str, int]] = None,\n ):\n \"\"\"TFExampleDataSource constructor.\n\n Args:\n split_to_filepattern: dict of string (split name) to either string\n (filename or filepattern) or list of strings (filenames or\n filepatterns).\n feature_description: dict, a mapping of string feature keys to\n `tf.io.FixedLenFeature` or `tf.io.VarLenFeature` values.\n reader_cls: `tf.data.Dataset`, a dataset class to read the input files.\n num_input_examples: dict or None, an optional dictionary mapping split to\n its size in number of input examples (before preprocessing). The\n `num_input_examples` method will return None if not provided.\n \"\"\"\n\n def read_file_fn(filepattern):\n return reader_cls(filepattern).map(\n lambda pb: tf.io.parse_single_example(pb, feature_description),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n super().__init__(\n read_file_fn=read_file_fn,\n split_to_filepattern=split_to_filepattern,\n num_input_examples=num_input_examples)\n\n\n# ========================== Offline Caching Helpers ===========================\n\n\ndef _rename_plaintext_to_pretokenized(\n dataset: tf.data.Dataset) -> tf.data.Dataset:\n \"\"\"Rename cached _plaintext features to new _pretokenized standard.\"\"\"\n def _rename(inputs):\n outputs = {}\n for k, v in inputs.items():\n if k.endswith(\"_plaintext\"):\n k = k[:-len(\"plaintext\")] + \"pretokenized\"\n outputs[k] = v\n return outputs\n return dataset.map(\n _rename, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\nclass _CachedDataSource(FileDataSource):\n \"\"\"A `FileDataSource` for reading datasets cached offline.\"\"\"\n\n def __init__(self, cache_dir: str, split: str):\n\n with tf.io.gfile.GFile(utils.get_cached_info_path(cache_dir, split)) as f:\n split_info = json.load(f)\n features = split_info[\"features\"]\n\n with tf.io.gfile.GFile(utils.get_cached_stats_path(cache_dir, split)) as f:\n stats = json.load(f)\n\n version_when_cached = version.Version(\n split_info.get(\"seqio_version\", \"0.pre\"))\n version_with_true_dtypes = version.Version(\"0.0.0\")\n if version_when_cached < version_with_true_dtypes:\n # Assume that all int64 features are really int32.\n for name, feat in features.items():\n if feat[\"dtype\"] == \"int64\":\n logging.info(\"Casting cached '%s' to int32.\", name)\n feat[\"dtype\"] = \"int32\"\n\n # Use `FixedLenSequenceFeature` for sequences with variable length.\n def _feature_config(shape, dtype):\n if dtype in (\"int32\", \"bool\"):\n # int32 and bool are stored as int64 in the tf.train.Example protobuf.\n # TODO(adarob): Support other conversions.\n dtype = \"int64\"\n if shape and shape[0] is None:\n return tf.io.FixedLenSequenceFeature(\n shape[1:], dtype, allow_missing=True)\n return tf.io.FixedLenFeature(shape, dtype)\n\n feature_description = {\n feat: _feature_config(**desc) for feat, desc in features.items()\n }\n\n def read_file_fn(filepattern):\n ds = tf.data.TFRecordDataset(filepattern)\n ds = ds.map(\n lambda pb: tf.io.parse_single_example(pb, feature_description),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Cast features back to the types from the info JSON since some features\n # must be cast for storage (e.g., in32 is stored as int64).\n ds = ds.map(\n lambda x: {k: tf.cast(v, features[k][\"dtype\"]) for k, v in x.items()},\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Legacy cached datasets may use old \"_plaintext\" suffix. Rename to\n # \"_pretokenized\".\n ds = _rename_plaintext_to_pretokenized(ds)\n return ds\n\n split_to_filepattern = {\n split: \"%s-*-of-*%d\" % (\n utils.get_cached_tfrecord_prefix(cache_dir, split),\n split_info[\"num_shards\"])\n }\n\n super().__init__(\n read_file_fn=read_file_fn,\n split_to_filepattern=split_to_filepattern,\n num_input_examples={split: stats[\"examples\"]}\n )\n\n\nclass CacheDatasetPlaceholder(object):\n \"\"\"A placeholder to signal when in the pipeline offline caching will occur.\"\"\"\n\n def __init__(self, required=False):\n \"\"\"CacheDatasetPlaceholder constructor.\n\n Args:\n required: whether the dataset must be accessed in its cached form, and\n on-the-fly preprocessing is disallowed.\n \"\"\"\n self._required = required\n\n @property\n def required(self):\n return self._required\n\n def __call__(self, dataset):\n raise RuntimeError(\"`CacheDatasetPlaceholder` should never be called.\")\n\n\n# ================================ Tasks =======================================\n\n\nMetricFnCallable = Callable[..., Mapping[str, float]]\n\n\nclass Task(DatasetProviderBase):\n \"\"\"A class to manage a dataset and its related metrics.\"\"\"\n\n def __init__(\n self,\n name: str,\n source: DataSource,\n output_features: Mapping[str, Feature],\n preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,\n postprocess_fn: Optional[Callable[..., Any]] = None,\n metric_fns: Optional[Sequence[MetricFnCallable]] = None,\n shuffle_buffer_size: Optional[int] = SHUFFLE_BUFFER_SIZE):\n \"\"\"Task constructor.\n\n Args:\n name: a unique name for the Task.\n source: a `DataSource` that provides a raw `tf.data.Dataset`.\n output_features: dict(str, Feature), output features of the Task to be\n passed to the model. After preprocessing, examples will be validated to\n ensure they include features that match this specification. Note that\n additional features may be included (e.g., for evaluation), but they\n will not be passed to the model.\n preprocessors: list(callable), an optional list of functions that receive\n a tf.data.Dataset and return a tf.data.Dataset. These will be executed\n sequentually and the final dataset must include features matching\n `output_features`.\n postprocess_fn: callable, an optional function that receives decoded model\n outputs and converts them to a form that is ready for evaluation using\n the metric functions in `metric_fns`.\n metric_fns: list(callable), an optional list of metric functions with the\n signature `metric_fn(targets, predictions)` to use during evaluation. If\n undefined or empty, no evaluation will occur on the task.\n shuffle_buffer_size: an optional integer to set the shuffle buffer size.\n If None, shuffling will be disallowed.\n \"\"\"\n if not _VALID_TASK_NAME_REGEX.match(name):\n raise ValueError(\n \"Task name '%s' contains invalid characters. Must match regex: %s\" % (\n name, _VALID_TASK_NAME_REGEX.pattern))\n\n metric_fns = metric_fns or []\n self._predict_metric_fns = []\n self._score_metric_fns = []\n for metric_fn in metric_fns:\n pos_args = tuple(\n key for key, param in inspect.signature(metric_fn).parameters.items()\n if param.default == inspect.Parameter.empty\n )\n if pos_args == (\"targets\", \"scores\"):\n self._score_metric_fns.append(metric_fn)\n elif pos_args == (\"targets\", \"predictions\"):\n self._predict_metric_fns.append(metric_fn)\n else:\n raise ValueError(\n \"Metric functions must have positional arguments matching either \"\n \"('targets', 'predictions') or ('targets', 'scores'). \"\n f\"Got: {pos_args}\")\n\n self._name = name\n self._source = source\n\n # Find optional CacheDatasetPlaceholder.\n preprocessors = tuple(preprocessors or [])\n cache_step_idxs = [\n i for i, p in enumerate(preprocessors)\n if isinstance(p, CacheDatasetPlaceholder)\n ]\n if len(cache_step_idxs) > 1:\n raise ValueError(\n \"`CacheDatasetPlaceholder` can appear at most once in the \"\n f\"preprocessing pipeline. Found {len(cache_step_idxs)} in '{name}'.\")\n cache_step_idx = cache_step_idxs[0] if cache_step_idxs else None\n if cache_step_idx is not None:\n for prep in preprocessors[:cache_step_idx]:\n prep_args = inspect.signature(prep).parameters.keys()\n if \"sequence_length\" in prep_args:\n raise ValueError(\n f\"'{prep.__name__}' has a `sequence_length` argument but occurs \"\n f\"before `CacheDatasetPlaceholder` in '{name}'. This is not \"\n \"allowed since the sequence length is specified at run time.\")\n if \"seed\" in prep_args or \"seeds\" in prep_args:\n raise logging.warning( # pylint:disable=logging-format-interpolation\n f\"'{prep.__name__}' has a `seed(s)` argument but occurs before \"\n f\"`CacheDatasetPlaceholder` in '{name}'. This is not recommended \"\n \"since the same samples will be used each epoch when reading \"\n \"from the cache.\")\n self._cache_step_idx = cache_step_idx\n self._preprocessors = preprocessors\n\n self._metric_fns = tuple(metric_fns)\n self._postprocess_fn = postprocess_fn\n\n self._cache_dir = None\n self._stats = {}\n self._shuffle_buffer_size = shuffle_buffer_size\n\n self._output_features = collections.OrderedDict(\n sorted(list(output_features.items()))\n )\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def metric_fns(self) -> Sequence[MetricFnCallable]:\n \"\"\"List of all metric functions.\"\"\"\n return self._predict_metric_fns + self._score_metric_fns\n\n @property\n def score_metric_fns(self) -> Sequence[MetricFnCallable]:\n \"\"\"List of metric functions that use log likelihood scores.\"\"\"\n return self._score_metric_fns\n\n @property\n def predict_metric_fns(self) -> Sequence[MetricFnCallable]:\n \"\"\"List of metric functions that use model predictions.\"\"\"\n return self._predict_metric_fns\n\n @property\n def output_features(self) -> Mapping[str, Feature]:\n return self._output_features\n\n @property\n def splits(self) -> Sequence[str]:\n s = self.source.splits\n if not s:\n raise ValueError(f\"Task {self.name} has no splits\")\n return s\n\n @property\n def source(self) -> DataSource:\n return self._source\n\n @property\n def preprocessors(self) -> Sequence[Callable[..., tf.data.Dataset]]:\n return self._preprocessors\n\n def num_input_examples(self, split: str) -> Optional[int]:\n return self.source.num_input_examples(split)\n\n def _preprocess_dataset(\n self,\n dataset: tf.data.Dataset,\n preprocessors: Sequence[Callable[..., tf.data.Dataset]],\n sequence_length: Optional[Mapping[str, int]] = None) -> tf.data.Dataset:\n \"\"\"Sequentially applies preprocessors.\"\"\"\n for prep_fn in preprocessors:\n # prep_fn must not rely on variable length keyword args such as **kwargs.\n fn_args = set(inspect.signature(prep_fn).parameters.keys())\n kwargs = {}\n if \"sequence_length\" in fn_args:\n kwargs[\"sequence_length\"] = sequence_length\n if \"output_features\" in fn_args:\n kwargs[\"output_features\"] = self.output_features\n dataset = prep_fn(dataset, **kwargs)\n return dataset\n\n def _validate_preprocessing(\n self, dataset: tf.data.Dataset\n ) -> tf.data.Dataset:\n \"\"\"Validates preprocessed dataset, raising Exceptions if needed.\n\n Args:\n dataset: a tf.data.Dataset to validate.\n\n Returns:\n a validated tf.data.Dataset.\n \"\"\"\n actual_specs = dataset.element_spec\n for feat, feat_spec in self.output_features.items():\n if feat not in actual_specs:\n if feat_spec.required:\n raise ValueError(\n \"Task dataset is missing expected output feature after \"\n f\"preprocessing: {feat}\")\n else:\n # It's ok that this feature does not exist.\n continue\n actual_spec = actual_specs[feat]\n if feat_spec.dtype != actual_spec.dtype:\n raise ValueError(\n f\"Task dataset has incorrect type for feature '{feat}' after \"\n f\"preprocessing: Got {actual_spec.dtype.name}, expected \"\n f\"{feat_spec.dtype.name}\")\n if actual_spec.shape.rank != 1:\n raise ValueError(\n f\"Task dataset has incorrect rank for feature '{feat}' after \"\n f\"preprocessing: Got {actual_spec.shape.rank}, expected 1\")\n\n return dataset\n\n def _trim_output_features(\n self,\n dataset: tf.data.Dataset,\n sequence_length: Optional[Mapping[str, int]]\n ) -> tf.data.Dataset:\n \"\"\"Trim output features to sequence length.\"\"\"\n def _trim(k: str, v: tf.Tensor) -> tf.Tensor:\n if k not in self.output_features or not sequence_length:\n return v\n return v[:sequence_length[k]]\n\n return dataset.map(\n lambda ex: {k: _trim(k, v) for k, v in ex.items()},\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n def preprocess_precache(\n self,\n dataset: tf.data.Dataset,\n seed: Optional[int] = None\n ) -> tf.data.Dataset:\n \"\"\"Runs preprocessing steps before the optional CacheDatasetPlaceholder.\"\"\"\n if not self.supports_caching:\n return dataset\n\n with utils.map_seed_manager(seed):\n return self._preprocess_dataset(\n dataset,\n self._preprocessors[:self._cache_step_idx],\n )\n\n def preprocess_postcache(\n self,\n dataset: tf.data.Dataset,\n sequence_length: Optional[Mapping[str, int]],\n seed: Optional[int] = None\n ) -> tf.data.Dataset:\n \"\"\"Runs preprocessing steps after the optional CacheDatasetPlaceholder.\n\n Args:\n dataset: a tf.data.Dataset\n sequence_length: dict mapping feature key to int length for that feature.\n If None, the features will not be truncated.\n seed: an optional random seed for deterministic preprocessing.\n Returns:\n a tf.data.Dataset\n \"\"\"\n start_idx = 0\n if self.supports_caching:\n # Skip a sufficient number of seeds to avoid duplicating any from\n # pre-cache preprocessing.\n seed = None if seed is None else seed + 42 * self._cache_step_idx\n start_idx = self._cache_step_idx + 1\n with utils.map_seed_manager(seed):\n dataset = self._preprocess_dataset(\n dataset,\n self._preprocessors[start_idx:],\n sequence_length=sequence_length,\n )\n return dataset\n\n @property\n def cache_dir(self) -> Optional[str]:\n \"\"\"Returns the cache directory (or None), initializing if needed.\"\"\"\n if not self._cache_dir:\n # See if cached data exists in any of the cache directories.\n potential_cache_dirs = [\n os.path.join(d, self.name) for d in utils.get_global_cache_dirs()]\n for cache_dir in potential_cache_dirs:\n try:\n if tf.io.gfile.exists(os.path.join(cache_dir, \"COMPLETED\")):\n self._cache_dir = cache_dir\n logging.info(\"'%s' is cached at %s.\", self.name, self.cache_dir)\n break\n except tf.errors.PermissionDeniedError:\n logging.warning(\n \"Permission denied for global cache folder: %s\", cache_dir)\n\n if not self._cache_dir:\n logging.info(\n \"'%s' does not exist in any task cache directories (searched %s).\",\n self.name,\n potential_cache_dirs,\n )\n return self._cache_dir\n\n @property\n def supports_caching(self) -> bool:\n \"\"\"Whether or not this task supports offline caching.\"\"\"\n return self._cache_step_idx is not None\n\n @property\n def requires_caching(self) -> bool:\n \"\"\"Whether or not this task requires offline caching.\"\"\"\n return (self._cache_step_idx is not None and\n self.preprocessors[self._cache_step_idx].required)\n\n def assert_cached(self) -> None:\n \"\"\"Raises an assertion error if cached dataset does not exist.\"\"\"\n assert self.cache_dir, (\n f\"'{self.name}' does not exist in any of the task cache directories.\")\n\n def get_cached_stats(self,\n split: str = tfds.Split.TRAIN\n ) -> Mapping[str, Union[int, float]]:\n \"\"\"Returns basic statistics for cached dataset.\"\"\"\n self.assert_cached()\n if split not in self._stats:\n stats_path = utils.get_cached_stats_path(self.cache_dir, split)\n if not tf.io.gfile.exists(stats_path):\n raise ValueError(\n \"Stats do not exist for '%s' split: %s\" % (self.name, split))\n with tf.io.gfile.GFile(stats_path) as f:\n self._stats[split] = json.load(f)\n return self._stats[split]\n\n def get_dataset(\n self,\n sequence_length: Optional[Mapping[str, int]],\n split: str = tfds.Split.TRAIN,\n use_cached: bool = False,\n shuffle: bool = True,\n shuffle_buffer_size: Optional[int] = None,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None,\n num_epochs: Optional[int] = 1\n ) -> tf.data.Dataset:\n \"\"\"Returns a tf.data.Dataset from cache or generated on the fly.\n\n Args:\n sequence_length: dict mapping feature key to maximum int length for that\n feature. If longer after preprocessing, the feature will be truncated.\n May be set to None to avoid truncation.\n split: string, the split to return.\n use_cached: bool, whether to use the cached dataset instead of processing\n it on the fly. Defaults to False.\n shuffle: bool, whether to shuffle the dataset. Only used when generating\n on the fly (use_cached=False).\n shuffle_buffer_size: an integer or None to use task-specific buffer size.\n seed: tf.int64 scalar tf.Tensor (or None) for shuffling tf.data.\n shard_info: optional specification for loading a shard of the split. If\n the Task's DataSource contains at least the number of shards in the\n specification, it will be passed the shard info to avoid loading the\n full source dataset. Otherwise, the full source dataset will be loaded\n and sharded at the individual examples.\n num_epochs: the number of times to iterate through the dataset, or `None`\n to repeat indefinitely. Note that the repeat occurs in the pipeline\n after offline caching, but before applying potentially stochastic\n post-cache preprocessors and is therefore typically preferred to calling\n `repeat()` on the returned dataset. Defaults to `1`.\n Returns:\n A tf.data.Dataset.\n \"\"\"\n if use_cached and not self.supports_caching:\n logging.warning(\n \"Task '%s' does not support caching. Switching to on-the-fly \"\n \"preprocessing.\", self.name)\n use_cached = False\n elif self.requires_caching and not use_cached:\n raise ValueError(\n f\"Task '{self.name}' requires caching, but was called with \"\n \"`use_cached=False`.\")\n\n if shard_info:\n # Whether we should shard at source or on the examples from the source.\n shard_data_source = (\n len(self.source.list_shards(split=split)) >= shard_info.num_shards)\n logging.info(\"Sharding at the %s: %d of %d\",\n \"data source\" if shard_data_source else \"examples\",\n shard_info.index, shard_info.num_shards)\n else:\n # No sharding.\n shard_data_source = False\n shard_info = ShardInfo(0, 1)\n\n if use_cached:\n source = self._get_cached_source(split)\n else:\n source = self.source\n\n if shard_data_source:\n ds = source.get_dataset(\n split=split, shuffle=shuffle, seed=seed, shard_info=shard_info)\n else:\n ds = source.get_dataset(split=split, shuffle=shuffle, seed=seed)\n ds = ds.shard(shard_info.num_shards, shard_info.index)\n\n if ((use_cached and\n self.get_cached_stats(split)[\"examples\"] < _MAX_EXAMPLES_TO_MEM_CACHE)\n or (self.num_input_examples(split) and\n self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE)):\n logging.info(\n \"Automatically caching small dataset in memory: '%s:%s'\",\n self.name, split)\n ds = ds.cache()\n\n if not use_cached:\n ds = self.preprocess_precache(ds, seed=seed)\n\n ds = ds.prefetch(tf.data.experimental.AUTOTUNE)\n\n # We repeat before calling any (potentially) stochastic post-cache\n # preprocessing in order to take new samples each epoch.\n ds = ds.repeat(num_epochs)\n\n # Post cache processing.\n ds = self.preprocess_postcache(\n ds, sequence_length=sequence_length, seed=seed)\n ds = self._validate_preprocessing(ds)\n ds = self._trim_output_features(ds, sequence_length=sequence_length)\n\n if shuffle:\n if self._shuffle_buffer_size is None:\n raise ValueError(\n f\"Shuffling is disallowed for Task '{self.name}' since its \"\n \"`shuffle_buffer_size` was set to `None` on construction.\")\n shuffle_buffer_size = shuffle_buffer_size or self._shuffle_buffer_size\n # Shuffle before mixing since preprocessor can output multiple\n # (correlated) examples per input.\n ds = ds.shuffle(shuffle_buffer_size, seed=seed)\n\n return ds.prefetch(tf.data.experimental.AUTOTUNE)\n\n def _get_cached_source(self, split) -> _CachedDataSource:\n \"\"\"Returns a DataSource to read cached files for split.\"\"\"\n self.assert_cached()\n return _CachedDataSource(self.cache_dir, split)\n\n def postprocess_fn(self, decoded_model_output: Any,\n **postprocess_kwargs) -> Any:\n \"\"\"Returns the model output after applying the postprocess function.\"\"\"\n if self._postprocess_fn:\n return self._postprocess_fn(decoded_model_output, **postprocess_kwargs)\n return decoded_model_output\n\n\nclass TaskRegistry(DatasetProviderRegistry):\n \"\"\"Registry of Tasks.\"\"\"\n _REGISTRY = {}\n _PROVIDER_TYPE = Task\n\n @classmethod\n def add(\n cls,\n name: str,\n source: DataSource,\n output_features: Mapping[str, Feature],\n preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,\n postprocess_fn: Optional[Callable[..., Any]] = None,\n metric_fns: Optional[Sequence[Callable[..., Mapping[str, float]]]] = None,\n **kwargs) -> Task:\n return super().add(name, Task, name, source, output_features, preprocessors,\n postprocess_fn, metric_fns, **kwargs)\n\n @classmethod\n def get(cls, name) -> Task:\n return super().get(name)\n\n\n# ================================ Mixtures ====================================\nclass Mixture(DatasetProviderBase):\n \"\"\"Class for mixing multiple tasks.\"\"\"\n\n def __init__(self,\n name: str,\n tasks: Union[Sequence[str],\n Sequence[Tuple[str, Union[int, float,\n Callable[[Task],\n float]]]]],\n default_rate: Union[float, Callable[[Task], float]] = None):\n \"\"\"Mixture constructor.\n\n A mixture specifies a set of tasks with associated mixing rates.\n\n Mixing happens on preprocessed tokenized examples.\n\n The mixing rates represent relative numbers of examples to use from their\n associated tasks. Setting the mixing rates to be equal to the numbers of\n examples in the tasks will result in each task going through an epoch in\n about the same amount of time - i.e. all examples are sampled equally across\n all tasks.\n\n Rates can be expressed either as absolute numbers or as functions that\n receive the Task as an argument.\n\n Args:\n name: string, a unique name for the Mixture.\n tasks: a list where each element is either a string (task name) or a\n pair whose first element is the task name and whose second element\n is either a float (rate) or a function from Task to float.\n default_rate: a float or a function from Task to float. This specifies the\n default rate if rates are not provided in the `tasks` argument.\n \"\"\"\n self._task_to_rate = {}\n self._tasks = []\n self._sub_mixtures = []\n self._name = name\n for t in tasks:\n if isinstance(t, str):\n task_name = t\n rate = default_rate\n if default_rate is None:\n raise ValueError(\"need a rate for each task\")\n else:\n task_name, rate = t\n\n if task_name in TaskRegistry.names():\n self._tasks.append(TaskRegistry.get(task_name))\n self._task_to_rate[task_name] = rate\n else:\n self._sub_mixtures.append(MixtureRegistry.get(task_name)) # pytype:disable=name-error\n self._task_to_rate[task_name] = rate\n\n if len(set(tuple(t.output_features) for t in self.tasks)) != 1:\n raise ValueError(\n \"All Tasks in a Mixture must have the same output features.\"\n )\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def tasks(self) -> Sequence[Task]:\n sub_tasks = (mix.tasks for mix in self._sub_mixtures)\n return list(sorted(set(sum(sub_tasks, self._tasks)), key=lambda t: t.name))\n\n @property\n def total_rate(self) -> float:\n return sum(float(rate(TaskRegistry.get(name)) if callable(rate) else rate)\n for name, rate in self._task_to_rate.items())\n\n def get_rate(self, task: Task) -> float:\n \"\"\"Computes the mixing rate for the given task.\"\"\"\n value = 0.0\n\n for mix in self._sub_mixtures:\n if task in mix.tasks:\n rate = self._task_to_rate[mix.name]\n value += rate * mix.get_rate(task) / mix.total_rate\n\n if task.name in self._task_to_rate:\n rate = self._task_to_rate[task.name]\n value += float(rate(task) if callable(rate) else rate)\n\n return value\n\n def num_input_examples(self, split: str) -> int:\n return sum(t.num_input_examples(split) for t in self.tasks)\n\n @property\n def splits(self) -> Sequence[str]:\n splits = set()\n for task in self.tasks:\n splits.update(task.splits)\n return tuple(splits)\n\n @property\n def output_features(self) -> Mapping[str, Feature]:\n # We require all tasks to have the same output_features in __init__\n # so we can just get the output_features for the 0th task\n return self.tasks[0].output_features\n\n def _check_compatible_features(self) -> None:\n \"\"\"Throw Exception if features across tasks have different vocabs or dtypes.\n \"\"\"\n for name, feature in self.tasks[0].output_features.items():\n for task in self.tasks[1:]:\n if task.output_features[name].vocabulary != feature.vocabulary:\n raise ValueError(\n \"Features across tasks in a mixture must use the same vocabulary.\"\n )\n if task.output_features[name].dtype != feature.dtype:\n raise ValueError(\n \"Features across tasks in a mixture must use the same dtype.\"\n )\n\n def get_dataset(\n self,\n sequence_length: Optional[Mapping[str, int]],\n split: str = tfds.Split.TRAIN,\n use_cached: bool = False,\n shuffle: bool = True,\n seed: Optional[int] = None,\n shard_info: Optional[ShardInfo] = None,\n num_epochs: Optional[int] = None,\n copy_pretokenized: bool = False,\n compute_stats_empirically: bool = False,\n ) -> tf.data.Dataset:\n \"\"\"Returns the dataset of mixed tasks using the object-specified rates.\n\n Args:\n sequence_length: dict mapping feature key to maximum int length for that\n feature. If longer after preprocessing, the feature will be truncated.\n May be set to None to avoid truncation.\n split: string, the split to return for all tasks.\n use_cached: bool, whether to use the cached dataset instead of processing\n it on the fly. Defaults to False.\n shuffle: bool, whether to shuffle the dataset. Only used when generating\n on the fly (use_cached=False).\n seed: tf.int64 scalar tf.Tensor (or None) for shuffling tf.data.\n shard_info: optional specification for loading a shard of the split.\n num_epochs: the number of times to iterate through the dataset, or `None`\n to repeat indefinitely. Note that the repeat occurs in the pipeline\n after offline caching, but before applying potentially stochastic\n post-cache preprocessors and is therefore typically preferred to calling\n `repeat()` on the returned dataset. Defaults to `None`.\n copy_pretokenized: bool, whether to pass through copies of pretokenized\n features a \"_pretokenized\" suffix added to the key.\n compute_stats_empirically: a boolean - does not work on TPU\n \"\"\"\n self._check_compatible_features()\n tasks = []\n for task in self.tasks:\n if split not in task.splits:\n logging.warning(\n \"Task %s has no '%s' split, skipping.\", task.name, split\n )\n continue\n tasks.append(task)\n if not tasks:\n raise ValueError(\"No datasets have a '{}' split\".format(split))\n\n output_feature_keys = set(self.output_features.keys())\n if copy_pretokenized:\n output_feature_keys.update(\n {f + \"_pretokenized\" for f in output_feature_keys})\n\n def filter_features(ex):\n return {k: v for k, v in ex.items() if k in output_feature_keys}\n datasets = [\n task.get_dataset( # pylint:disable=g-complex-comprehension\n sequence_length,\n split=split,\n use_cached=use_cached,\n shuffle=shuffle,\n seed=seed,\n shard_info=shard_info,\n num_epochs=num_epochs)\n .map(filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n for task in tasks]\n rates = [self.get_rate(task) for task in tasks]\n # Sample from the dataset with the rates rates\n if seed is not None:\n sample_seed = seed\n elif shuffle:\n sample_seed = None\n else:\n sample_seed = 42\n dataset = tf.data.experimental.sample_from_datasets(\n datasets, rates, sample_seed)\n if (split == \"train\" and use_cached and\n all(t.supports_caching for t in tasks)):\n _log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,\n compute_stats_empirically)\n return dataset\n\n\ndef _log_padding_fractions(dataset, sequence_length, num_examples=100):\n \"\"\"Empirically compute the fraction of padding - log the results.\n\n Args:\n dataset: a tf.data.Dataset\n sequence_length: dict from string to int (packed lengths)\n num_examples: an integer\n \"\"\"\n logging.info(\"computing padding fractions\")\n keys = sequence_length.keys()\n padding_frac = {k: 0 for k in keys}\n for ex in tfds.as_numpy(dataset.take(num_examples)):\n for k in keys:\n padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))\n for k in keys:\n logging.info(\"%s padding fraction = %g\", k, padding_frac[k])\n\n\ndef _log_mixing_proportions(\n tasks, datasets, rates, mixed_dataset,\n sequence_length, compute_stats_empirically):\n \"\"\"Log information about the mixing proportions.\n\n Called from Mixture.get_dataset.\n\n Args:\n tasks: a list of Task\n datasets: a list of tf.data.Dataset\n rates: a list of floats\n mixed_dataset: a tf.data.Dataset\n sequence_length: dict from string to int (packed lengths)\n compute_stats_empirically: a boolean - does not work on TPU\n \"\"\"\n def _normalize(l):\n denom = sum(l)\n if not denom:\n return l\n return [x / denom for x in l]\n # compute some stats about the mixture\n examples_fraction = _normalize(rates)\n if compute_stats_empirically:\n stats_examples = 100\n mean_inputs_length = []\n mean_targets_length = []\n for dataset in datasets:\n inputs_sum = 0\n targets_sum = 0\n for ex in tfds.as_numpy(dataset.take(stats_examples)):\n # Some tasks, like LMs, don't have inputs.\n if \"inputs\" in ex:\n inputs_sum += ex[\"inputs\"].size\n targets_sum += ex[\"targets\"].size\n mean_inputs_length.append(inputs_sum / float(stats_examples))\n mean_targets_length.append(targets_sum / float(stats_examples))\n else:\n def _estimated_mean_length(task, key):\n if key not in sequence_length:\n return 0\n if (task.supports_caching and\n task._cache_step_idx < len(task._preprocessors) - 1): # pylint:disable=protected-access\n # There is processing after caching, so we can't rely on the stats.\n return sequence_length[key]\n # Some tasks, like LMs, don't have inputs.\n if key + \"_tokens\" in task.get_cached_stats(\"train\"):\n return min(sequence_length[key],\n (task.get_cached_stats(\"train\")[key + \"_tokens\"] /\n task.get_cached_stats(\"train\")[\"examples\"]))\n else:\n return 0\n\n mean_inputs_length = [_estimated_mean_length(task, \"inputs\")\n for task in tasks]\n mean_targets_length = [_estimated_mean_length(task, \"targets\")\n for task in tasks]\n inputs_fraction = _normalize(\n [l * r for l, r in zip(mean_inputs_length, rates)])\n targets_fraction = _normalize(\n [l * r for l, r in zip(mean_targets_length, rates)])\n logging.info(\"%12s %12s %12s %12s %12s %12s %s\",\n \"rate\", \"ex.frac.\", \"inp.frac.\", \"tgt.frac.\",\n \"inp.len.\", \"tgt.len\", \"task\")\n for i in range(len(rates)):\n logging.info(\"%12g %12g %12g %12g %12g %12g %s\",\n rates[i], examples_fraction[i],\n inputs_fraction[i], targets_fraction[i],\n mean_inputs_length[i], mean_targets_length[i],\n tasks[i].name)\n if compute_stats_empirically:\n _log_padding_fractions(mixed_dataset, sequence_length)\n\n\nclass MixtureRegistry(DatasetProviderRegistry):\n \"\"\"Registry of Mixtures.\"\"\"\n _REGISTRY = {}\n _PROVIDER_TYPE = Mixture\n\n @classmethod\n def add(cls, name, tasks, default_rate=None) -> Mixture:\n return super().add(name, Mixture, name, tasks, default_rate)\n\n @classmethod\n def get(cls, name) -> Mixture:\n return super().get(name)\n\n\ndef get_mixture_or_task(task_or_mixture_name):\n \"\"\"Return the Task or Mixture from the appropriate registry.\"\"\"\n mixtures = MixtureRegistry.names()\n tasks = TaskRegistry.names()\n if task_or_mixture_name in mixtures:\n if task_or_mixture_name in tasks:\n logging.warning(\"%s is both a Task and a Mixture, returning Mixture\",\n task_or_mixture_name)\n return MixtureRegistry.get(task_or_mixture_name)\n if task_or_mixture_name in tasks:\n return TaskRegistry.get(task_or_mixture_name)\n else:\n raise ValueError(\"No Task or Mixture found with name: %s\" %\n task_or_mixture_name)\n\n\ndef get_subtasks(task_or_mixture):\n \"\"\"Returns all the Tasks in a Mixture as a list or the Task itself.\"\"\"\n if isinstance(task_or_mixture, Task):\n return [task_or_mixture]\n else:\n return task_or_mixture.tasks\n\n\ndef get_dataset(\n mixture_or_task_name: str,\n task_feature_lengths: Mapping[str, int],\n feature_converter: FeatureConverter,\n dataset_split: str = \"train\",\n use_cached: bool = False,\n shuffle: bool = False,\n num_epochs: Optional[int] = 1,\n shard_info: ShardInfo = None,\n verbose: bool = True,\n seed: Optional[int] = None\n) -> tf.data.Dataset:\n \"\"\"Get processed dataset with the model features.\n\n In order to use options specific to a feature converter, e.g., packing,\n `feature_converter` instance should be instantiated with those options before\n being pased to this function.\n\n Getting sharded datasets is supported. To use this feature, pass in\n `shard_info`, with shard_index and num_shards information. Sharding is done\n before the feature converter stage. Therefore, if packing is used it will be\n done on the sharded dataset.\n\n Args:\n mixture_or_task_name: mixture or task name for the Task API.\n task_feature_lengths: dict mapping task feature key to its sequence length.\n This specifies the sequence length of the dataset from the Task API.\n feature_converter: a feature converter object to use to convert the task\n features to model features.\n Must be a subclass of FeatureConverter.\n dataset_split: the split to use.\n use_cached: whether to use the cached dataset instead of processing it on\n the fly.\n shuffle: whether to shuffle the dataset.\n num_epochs: the number of times to iterate through the dataset, or `None` to\n repeat indefinitely. Note that the repeat occurs in the pipeline after\n offline caching, but before applying potentially stochastic post-cache\n preprocessors and is therefore typically preferred to calling `repeat()`\n on the returned dataset. Defaults to `1`.\n shard_info: number of shards and shard index information.\n verbose: if true, log the feature shapes.\n seed: a random seed to for shuffling tf.data.\n\n Returns:\n ds: the processed dataset.\n \"\"\"\n if not isinstance(feature_converter, FeatureConverter):\n raise TypeError(\n \"feature_converter should be an instance of FeatureConverter.\")\n\n mixture_or_task = get_mixture_or_task(mixture_or_task_name)\n\n ds = mixture_or_task.get_dataset(\n task_feature_lengths,\n split=dataset_split,\n use_cached=use_cached,\n shuffle=shuffle,\n seed=seed,\n shard_info=shard_info,\n num_epochs=num_epochs)\n\n ds = feature_converter(ds, task_feature_lengths=task_feature_lengths)\n\n if verbose:\n logging.info(\n \"The output dataset from seqio.get_dataset has the following features\")\n for feature_name, tensor_spec in ds.element_spec.items():\n logging.info(\"feature: %s \\t shape: %s \\t dtype: %s\", feature_name,\n tensor_spec.shape.as_list(), tensor_spec.dtype.name)\n return ds\n"
] | [
[
"tensorflow.compat.v2.data.TFRecordDataset",
"tensorflow.compat.v2.io.parse_single_example",
"tensorflow.compat.v2.io.FixedLenSequenceFeature",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.data.experimental.sample_from_datasets",
"tensorflow.compat.v2.io.gfile.glob",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.data.TextLineDataset",
"numpy.array",
"tensorflow.compat.v2.io.gfile.exists",
"tensorflow.compat.v2.io.FixedLenFeature"
]
] |
bmalezieux/unrolled_dl | [
"5854a6991e44db025a99a9f0d38be6b1e669aa83"
] | [
"experiments_approximate/experiments/create_dico_alphacsc.py"
] | [
"import numpy as np\n\natoms_to_save = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 15, 18])\n\nu_cdl = np.load(\"u_cdl.npy\")\nv_cdl = np.load(\"v_cdl.npy\")\n\nnp.save(\"u_cdl_modified.npy\", u_cdl[atoms_to_save])\nnp.save(\"v_cdl_modified.npy\", v_cdl[atoms_to_save])\n"
] | [
[
"numpy.array",
"numpy.load",
"numpy.save"
]
] |
KevHg/reddit-sentiment | [
"383407105957b8a582a524fa29b9f21d7b2cbd23"
] | [
"main.py"
] | [
"import os\nfrom scrapy.crawler import CrawlerProcess\nimport pandas as pd\nimport logging\nimport nltk\n\nimport json_reader\nfrom sentiment_score import clean_text, calculate_sentiment_score\nfrom reddit_scraper.reddit_scraper.spiders.reddit_post_scraper import RedditPostCrawler\n\nif __name__ == '__main__':\n # Initial setup: Disable scrapy logs and download NLTK files\n logging.getLogger('scrapy').propagate = False\n nltk.download('averaged_perceptron_tagger', quiet=True)\n nltk.download('wordnet', quiet=True)\n\n # Ask for user query\n subreddit = input('Subreddit: ')\n term = input('Search term: ')\n term = term.replace(' ', '+')\n\n # Start crawler process\n print('[LOG] Crawling Reddit, this will take a little time...')\n process = CrawlerProcess(settings={\n 'FEED_FORMAT': 'jl',\n 'FEED_URI': 'data.jl'\n })\n process.crawl(RedditPostCrawler,\n domain=f'https://old.reddit.com/r/{subreddit}/search?q={term}&restrict_sr=on&sort=relevance&t=all')\n process.start()\n\n # Convert data file to class\n print('[LOG] Creating DataFrame table...')\n reddit_posts = json_reader.convert_json('data.jl')\n all_comments = []\n all_upvotes = []\n for post in reddit_posts:\n for comment in post.comments:\n all_comments.append(clean_text(comment.text))\n\n # Convert upvote text to float, e.g. '15.3k upvotes' -> 15300\n upvote = comment.upvotes.split(' ')[0]\n if 'k' in upvote:\n upvote = upvote[:-1]\n upvote = float(upvote) * 1000\n all_upvotes.append(float(upvote))\n\n df = pd.DataFrame({'comment': all_comments, 'upvotes': all_upvotes})\n df = df[df.upvotes >= 1]\n\n print('[LOG] Calculating sentiment score, this may take a longer time...')\n df = calculate_sentiment_score(df)\n\n # df.to_csv('results.csv')\n normalized_result = df.sentiment.mean()\n\n print('[LOG] Completed!\\n')\n print('Average sentiment:', normalized_result)\n print('where +1 is most positive and -1 is most negative')\n\n os.remove('data.jl')\n"
] | [
[
"pandas.DataFrame"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.