repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
zengxinzhy/colorization-pytorch
[ "a41e61dfc0d99532728af3cbfd21efbdaf6086c5" ]
[ "util/visualizer.py" ]
[ "import numpy as np\nimport os\nimport ntpath\nimport time\nfrom . import util\nfrom . import html\nfrom PIL import Image\nfrom torchvision import transforms\n\n\ndef imresize(image, size, interp=Image.BICUBIC):\n return transforms.Resize(size=size, interpolation=interp)(image)\n\n# save image to the disk\n\n\ndef save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):\n image_dir = webpage.get_image_dir()\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n webpage.add_header(name)\n ims, txts, links = [], [], []\n\n for label, im_data in visuals.items():\n im = util.tensor2im(im_data)\n image_name = '%s_%s.png' % (name, label)\n save_path = os.path.join(image_dir, image_name)\n h, w, _ = im.shape\n if aspect_ratio > 1.0:\n im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')\n if aspect_ratio < 1.0:\n im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')\n util.save_image(im, save_path)\n\n ims.append(image_name)\n txts.append(label)\n links.append(image_name)\n webpage.add_images(ims, txts, links, width=width)\n\n\nclass Visualizer():\n def __init__(self, opt):\n self.display_id = opt.display_id\n self.use_html = opt.isTrain and not opt.no_html\n self.win_size = opt.display_winsize\n self.name = opt.name\n self.opt = opt\n self.saved = False\n if self.display_id > 0:\n import visdom\n self.ncols = opt.display_ncols\n self.vis = visdom.Visdom(\n server=opt.display_server, port=opt.display_port)\n\n if self.use_html:\n self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n self.img_dir = os.path.join(self.web_dir, 'images')\n print('create web directory %s...' % self.web_dir)\n util.mkdirs([self.web_dir, self.img_dir])\n self.log_name = os.path.join(\n opt.checkpoints_dir, opt.name, 'loss_log.txt')\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write(\n '================ Training Loss (%s) ================\\n' % now)\n\n def reset(self):\n self.saved = False\n\n # |visuals|: dictionary of images to display or save\n def display_current_results(self, visuals, epoch, save_result):\n if self.display_id > 0: # show images in the browser\n ncols = self.ncols\n if ncols > 0:\n ncols = min(ncols, len(visuals))\n h, w = next(iter(visuals.values())).shape[:2]\n table_css = \"\"\"<style>\n table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}\n table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}\n </style>\"\"\" % (w, h)\n title = self.name\n label_html = ''\n label_html_row = ''\n images = []\n idx = 0\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n label_html_row += '<td>%s</td>' % label\n images.append(image_numpy.transpose([2, 0, 1]))\n idx += 1\n if idx % ncols == 0:\n label_html += '<tr>%s</tr>' % label_html_row\n label_html_row = ''\n white_image = np.ones_like(\n image_numpy.transpose([2, 0, 1])) * 255\n while idx % ncols != 0:\n images.append(white_image)\n label_html_row += '<td></td>'\n idx += 1\n if label_html_row != '':\n label_html += '<tr>%s</tr>' % label_html_row\n # pane col = image row\n self.vis.images(images, nrow=ncols, win=self.display_id + 1,\n padding=2, opts=dict(title=title + ' images'))\n label_html = '<table>%s</table>' % label_html\n self.vis.text(table_css + label_html, win=self.display_id + 2,\n opts=dict(title=title + ' labels'))\n else:\n idx = 1\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),\n win=self.display_id + idx)\n idx += 1\n\n # save images to a html file\n if self.use_html and (save_result or not self.saved):\n self.saved = True\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = os.path.join(\n self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n util.save_image(image_numpy, img_path)\n # update website\n webpage = html.HTML(\n self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)\n for n in range(epoch, 0, -1):\n webpage.add_header('epoch [%d]' % n)\n ims, txts, links = [], [], []\n\n for label, image_numpy in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = 'epoch%.3d_%s.png' % (n, label)\n ims.append(img_path)\n txts.append(label)\n links.append(img_path)\n webpage.add_images(ims, txts, links, width=self.win_size)\n webpage.save()\n\n # losses: dictionary of error labels and values\n def plot_current_losses(self, epoch, counter_ratio, opt, losses):\n if not hasattr(self, 'plot_data'):\n self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n self.plot_data['X'].append(epoch + counter_ratio)\n self.plot_data['Y'].append([losses[k]\n for k in self.plot_data['legend']])\n self.vis.line(\n X=np.stack([np.array(self.plot_data['X'])] *\n len(self.plot_data['legend']), 1),\n Y=np.array(self.plot_data['Y']),\n opts={\n 'title': self.name + ' loss over time',\n 'legend': self.plot_data['legend'],\n 'xlabel': 'epoch',\n 'ylabel': 'loss'},\n win=self.display_id)\n\n # losses: same format as |losses| of plot_current_losses\n def print_current_losses(self, epoch, i, losses, t, t_data):\n message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (\n epoch, i, t, t_data)\n for k, v in losses.items():\n message += '%s: %.3f, ' % (k, v)\n\n print(message)\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message)\n" ]
[ [ "numpy.array" ] ]
carocamargo/pygmt
[ "6139c1735cff7f7d615d243145c21b1efef3f2c6" ]
[ "pygmt/helpers/testing.py" ]
[ "\"\"\"\nHelper functions for testing.\n\"\"\"\nimport inspect\nimport os\nimport string\n\nfrom matplotlib.testing.compare import compare_images\nfrom ..exceptions import GMTImageComparisonFailure\n\n\ndef check_figures_equal(*, extensions=(\"png\",), tol=0.0, result_dir=\"result_images\"):\n \"\"\"\n Decorator for test cases that generate and compare two figures.\n\n The decorated function must return two arguments, *fig_ref* and *fig_test*,\n these two figures will then be saved and compared against each other.\n\n This decorator is practically identical to matplotlib's check_figures_equal\n function, but adapted for PyGMT figures. See also the original code at\n https://matplotlib.org/3.3.1/api/testing_api.html#\n matplotlib.testing.decorators.check_figures_equal\n\n Parameters\n ----------\n extensions : list\n The extensions to test. Default is [\"png\"].\n tol : float\n The RMS threshold above which the test is considered failed.\n result_dir : str\n The directory where the figures will be stored.\n\n Examples\n --------\n\n >>> import pytest\n >>> import shutil\n >>> from pygmt import Figure\n\n >>> @check_figures_equal(result_dir=\"tmp_result_images\")\n ... def test_check_figures_equal():\n ... fig_ref = Figure()\n ... fig_ref.basemap(projection=\"X5c\", region=[0, 5, 0, 5], frame=True)\n ... fig_test = Figure()\n ... fig_test.basemap(projection=\"X5c\", region=[0, 5, 0, 5], frame=\"af\")\n ... return fig_ref, fig_test\n >>> test_check_figures_equal()\n >>> assert len(os.listdir(\"tmp_result_images\")) == 0\n >>> shutil.rmtree(path=\"tmp_result_images\") # cleanup folder if tests pass\n\n >>> @check_figures_equal(result_dir=\"tmp_result_images\")\n ... def test_check_figures_unequal():\n ... fig_ref = Figure()\n ... fig_ref.basemap(projection=\"X5c\", region=[0, 5, 0, 5], frame=True)\n ... fig_test = Figure()\n ... fig_test.basemap(projection=\"X5c\", region=[0, 3, 0, 3], frame=True)\n ... return fig_ref, fig_test\n >>> with pytest.raises(GMTImageComparisonFailure):\n ... test_check_figures_unequal()\n >>> for suffix in [\"\", \"-expected\", \"-failed-diff\"]:\n ... assert os.path.exists(\n ... os.path.join(\n ... \"tmp_result_images\",\n ... f\"test_check_figures_unequal{suffix}.png\",\n ... )\n ... )\n >>> shutil.rmtree(path=\"tmp_result_images\") # cleanup folder if tests pass\n \"\"\"\n # pylint: disable=invalid-name\n ALLOWED_CHARS = set(string.digits + string.ascii_letters + \"_-[]()\")\n KEYWORD_ONLY = inspect.Parameter.KEYWORD_ONLY\n\n def decorator(func):\n import pytest\n\n os.makedirs(result_dir, exist_ok=True)\n old_sig = inspect.signature(func)\n\n @pytest.mark.parametrize(\"ext\", extensions)\n def wrapper(*args, ext=\"png\", request=None, **kwargs):\n if \"ext\" in old_sig.parameters:\n kwargs[\"ext\"] = ext\n if \"request\" in old_sig.parameters:\n kwargs[\"request\"] = request\n try:\n file_name = \"\".join(c for c in request.node.name if c in ALLOWED_CHARS)\n except AttributeError: # 'NoneType' object has no attribute 'node'\n file_name = func.__name__\n try:\n fig_ref, fig_test = func(*args, **kwargs)\n ref_image_path = os.path.join(result_dir, f\"{file_name}-expected.{ext}\")\n test_image_path = os.path.join(result_dir, f\"{file_name}.{ext}\")\n fig_ref.savefig(ref_image_path)\n fig_test.savefig(test_image_path)\n\n # Code below is adapted for PyGMT, and is originally based on\n # matplotlib.testing.decorators._raise_on_image_difference\n err = compare_images(\n expected=ref_image_path,\n actual=test_image_path,\n tol=tol,\n in_decorator=True,\n )\n if err is None: # Images are the same\n os.remove(ref_image_path)\n os.remove(test_image_path)\n else: # Images are not the same\n for key in [\"actual\", \"expected\", \"diff\"]:\n err[key] = os.path.relpath(err[key])\n raise GMTImageComparisonFailure(\n \"images not close (RMS %(rms).3f):\\n\\t%(actual)s\\n\\t%(expected)s \"\n % err\n )\n finally:\n del fig_ref\n del fig_test\n\n parameters = [\n param\n for param in old_sig.parameters.values()\n if param.name not in {\"fig_test\", \"fig_ref\"}\n ]\n if \"ext\" not in old_sig.parameters:\n parameters += [inspect.Parameter(\"ext\", KEYWORD_ONLY)]\n if \"request\" not in old_sig.parameters:\n parameters += [inspect.Parameter(\"request\", KEYWORD_ONLY)]\n new_sig = old_sig.replace(parameters=parameters)\n wrapper.__signature__ = new_sig\n\n # reach a bit into pytest internals to hoist the marks from\n # our wrapped function\n new_marks = getattr(func, \"pytestmark\", []) + wrapper.pytestmark\n wrapper.pytestmark = new_marks\n\n return wrapper\n\n return decorator\n" ]
[ [ "matplotlib.testing.compare.compare_images" ] ]
banasraf/DALI
[ "f834f3e619b15e3df87bf0316ac8806d0998126e" ]
[ "dali/test/python/test_operator_warp.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\nimport nvidia.dali as dali\nfrom nvidia.dali.backend_impl import TensorListGPU\nimport numpy as np\nimport math\nfrom numpy.testing import assert_array_equal, assert_allclose\nimport os\nimport cv2\nfrom test_utils import check_batch\nfrom test_utils import compare_pipelines\nfrom test_utils import RandomDataIterator\nimport random\n\ntest_data_root = os.environ['DALI_EXTRA_PATH']\ncaffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')\n\ndef gen_transform(angle, zoom, dst_cx, dst_cy, src_cx, src_cy):\n t1 = np.array([[1, 0, -dst_cx], [0, 1, -dst_cy], [0, 0, 1]])\n cosa = math.cos(angle)/zoom\n sina = math.sin(angle)/zoom\n r = np.array([\n [cosa, -sina, 0],\n [sina, cosa, 0],\n [0, 0, 1]])\n t2 = np.array([[1, 0, src_cx], [0, 1, src_cy], [0, 0, 1]])\n return (np.matmul(t2, np.matmul(r, t1)))[0:2,0:3]\n\ndef gen_transforms(n, step):\n a = 0.0\n step = step * (math.pi/180)\n out = np.zeros([n, 2, 3])\n for i in range(n):\n out[i,:,:] = gen_transform(a, 2, 160, 120, 100, 100)\n a = a + step\n return out.astype(np.float32)\n\ndef ToCVMatrix(matrix):\n offset = np.matmul(matrix, np.array([[0.5], [0.5], [1]]))\n result = matrix.copy()\n result[0][2] = offset[0] - 0.5\n result[1][2] = offset[1] - 0.5\n return result\n\ndef CVWarp(output_type, input_type, warp_matrix = None, inv_map = False):\n def warp_fn(img, matrix):\n size = (320, 240)\n matrix = ToCVMatrix(matrix)\n if output_type == dali.types.FLOAT or input_type == dali.types.FLOAT:\n img = np.float32(img)\n out = cv2.warpAffine(img, matrix, size, borderMode = cv2.BORDER_CONSTANT, borderValue = [42,42,42],\n flags = (cv2.INTER_LINEAR|cv2.WARP_INVERSE_MAP) if inv_map else cv2.INTER_LINEAR);\n if output_type == dali.types.UINT8 and input_type == dali.types.FLOAT:\n out = np.uint8(np.clip(out, 0, 255))\n return out\n\n if warp_matrix:\n m = np.array(warp_matrix)\n def warp_fixed(img):\n return warp_fn(img, m)\n return warp_fixed\n\n return warp_fn\n\n\nclass WarpPipeline(Pipeline):\n def __init__(self, device, batch_size, output_type, input_type, use_input, num_threads=3, device_id=0, num_gpus=1, inv_map=False):\n super(WarpPipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)\n self.use_input = use_input\n self.use_dynamic_size = use_input # avoid Cartesian product\n self.name = device\n self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)\n self.decode = ops.ImageDecoder(device = \"cpu\", output_type = types.RGB)\n if input_type != dali.types.UINT8:\n self.cast = ops.Cast(device = device, dtype = input_type)\n else:\n self.cast = None\n\n static_size = None if self.use_dynamic_size else (240,320)\n\n if use_input:\n self.transform_source = ops.ExternalSource(lambda: gen_transforms(self.batch_size, 10))\n self.warp = ops.WarpAffine(device = device, size=static_size, fill_value = 42, dtype = output_type, inverse_map=inv_map)\n else:\n warp_matrix = (0.1, 0.9, 10, 0.8, -0.2, -20)\n self.warp = ops.WarpAffine(device = device, size=static_size, matrix = warp_matrix, fill_value = 42, dtype = output_type, inverse_map=inv_map)\n\n self.iter = 0\n\n def define_graph(self):\n self.jpegs, self.labels = self.input(name = \"Reader\")\n images = self.decode(self.jpegs)\n if self.warp.device == \"gpu\":\n images = images.gpu()\n if self.cast:\n images = self.cast(images)\n\n dynamic_size = types.Constant(np.array([240, 320], dtype=np.float32)) if self.use_dynamic_size else None\n\n if self.use_input:\n transform = self.transform_source()\n outputs = self.warp(images, transform, size = dynamic_size)\n else:\n outputs = self.warp(images, size = dynamic_size)\n return outputs\n\n\nclass CVPipeline(Pipeline):\n def __init__(self, batch_size, output_type, input_type, use_input, num_threads=3, device_id=0, num_gpus=1, inv_map=False):\n super(CVPipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)\n self.use_input = use_input\n self.name = \"cv\"\n self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)\n self.decode = ops.ImageDecoder(device = \"cpu\", output_type = types.RGB)\n if self.use_input:\n self.transform_source = ops.ExternalSource(lambda: gen_transforms(self.batch_size, 10))\n self.warp = ops.PythonFunction(function=CVWarp(output_type, input_type, inv_map=inv_map))\n else:\n self.warp = ops.PythonFunction(function=CVWarp(output_type, input_type, [[0.1, 0.9, 10], [0.8, -0.2, -20]], inv_map))\n self.set_layout = ops.Reshape(layout=\"HWC\")\n self.iter = 0\n\n def define_graph(self):\n self.jpegs, self.labels = self.input(name = \"Reader\")\n images = self.decode(self.jpegs)\n if self.use_input:\n self.transform = self.transform_source()\n outputs = self.warp(images, self.transform)\n else:\n outputs = self.warp(images)\n outputs = self.set_layout(outputs)\n return outputs\n\ndef compare(pipe1, pipe2, eps):\n epoch_size = pipe1.epoch_size(\"Reader\")\n batch_size = pipe1.batch_size\n niter = (epoch_size + batch_size - 1) // batch_size\n compare_pipelines(pipe1, pipe2, batch_size, niter, eps);\n\nio_types = [\n (dali.types.UINT8, dali.types.UINT8),\n (dali.types.UINT8, dali.types.FLOAT),\n (dali.types.FLOAT, dali.types.UINT8),\n (dali.types.FLOAT, dali.types.FLOAT)\n]\n\n\ndef test_cpu_vs_cv():\n random.seed(1009)\n for batch_size in [1, 4, 19]:\n for use_input in [False, True]:\n for (itype, otype) in io_types:\n inv_map = random.choice([False, True])\n print(\"Testing cpu vs cv\",\n \"\\nbatch size: \", batch_size,\n \" matrix as input: \", use_input,\n \" input_type: \", itype,\n \" output_type: \", otype,\n \" map_inverse:\", inv_map)\n cv_pipeline = CVPipeline(batch_size, otype, itype, use_input, inv_map=inv_map);\n cv_pipeline.build();\n\n cpu_pipeline = WarpPipeline(\"cpu\", batch_size, otype, itype, use_input, inv_map=inv_map);\n cpu_pipeline.build();\n\n compare(cv_pipeline, cpu_pipeline, 8)\n\ndef test_gpu_vs_cv():\n random.seed(1007)\n for batch_size in [1, 4, 19]:\n for use_input in [False, True]:\n for (itype, otype) in io_types:\n inv_map = random.choice([False, True])\n print(\"Testing gpu vs cv\",\n \"\\nbatch size: \", batch_size,\n \" matrix as input: \", use_input,\n \" input_type: \", itype,\n \" output_type: \", otype,\n \" map_inverse:\", inv_map)\n cv_pipeline = CVPipeline(batch_size, otype, itype, use_input, inv_map=inv_map);\n cv_pipeline.build();\n\n gpu_pipeline = WarpPipeline(\"gpu\", batch_size, otype, itype, use_input, inv_map=inv_map);\n gpu_pipeline.build();\n\n compare(cv_pipeline, gpu_pipeline, 8)\n\ndef test_gpu_vs_cpu():\n random.seed(1005)\n for batch_size in [1, 4, 19]:\n for use_input in [False, True]:\n for (itype, otype) in io_types:\n inv_map = random.choice([False, True])\n print(\"Testing gpu vs cpu\",\n \"\\nbatch size: \", batch_size,\n \" matrix as input: \", use_input,\n \" input_type: \", itype,\n \" output_type: \", otype,\n \" map_inverse:\", inv_map)\n cpu_pipeline = WarpPipeline(\"cpu\", batch_size, otype, itype, use_input, inv_map=inv_map);\n cpu_pipeline.build();\n\n gpu_pipeline = WarpPipeline(\"gpu\", batch_size, otype, itype, use_input, inv_map=inv_map);\n gpu_pipeline.build();\n\n compare(cpu_pipeline, gpu_pipeline, 1)\n" ]
[ [ "numpy.matmul", "numpy.zeros", "numpy.float32", "numpy.clip", "numpy.array" ] ]
jiangyuang/ModelPruningLibrary
[ "9c8ba5a3c5d118f37768d5d42254711f48d88745" ]
[ "mpl/models/base_model.py" ]
[ "from abc import ABC, abstractmethod\nfrom typing import Union, Sized, List, Tuple\nfrom copy import deepcopy\n\nimport torch\nfrom torch import nn as nn\n\nfrom ..nn.linear import DenseLinear\nfrom ..nn.conv2d import DenseConv2d\nfrom .utils import collect_leaf_modules, is_parameterized\n\n\nclass BaseModel(nn.Module, ABC):\n def __init__(self):\n super(BaseModel, self).__init__()\n\n self.prunable_layers: list = []\n self.prunable_layer_prefixes: list = []\n\n def clone_from_model(self, original_model: nn.Module = None):\n # copying all submodules from original model\n for name, module in original_model._modules.items():\n self.add_module(name, deepcopy(module))\n\n def collect_prunable_layers(self) -> None:\n self.prunable_layers, self.prunable_layer_prefixes = self.find_layers(lambda x: is_parameterized(x))\n\n def convert_eligible_layers(self):\n # changing all conv2d and linear layers to customized ones\n for module_name, old_module in zip(self.prunable_layer_prefixes, self.prunable_layers):\n if isinstance(old_module, nn.Linear):\n self.set_module_by_name(module_name, DenseLinear.from_linear(old_module))\n elif isinstance(old_module, nn.Conv2d):\n self.set_module_by_name(module_name, DenseConv2d.from_conv2d(old_module))\n\n def find_layers(self, criterion) -> Tuple[List, List]:\n layers, names = [], []\n collect_leaf_modules(self, criterion, layers, names)\n return layers, names\n\n @abstractmethod\n def forward(self, inputs) -> torch.Tensor:\n pass\n\n def prune_by_threshold(self, thr_arg: Union[int, float, Sized]):\n prunable_layers = self.prunable_layers\n if isinstance(thr_arg, Sized):\n assert len(prunable_layers) == len(thr_arg)\n else:\n thr_arg = [thr_arg] * len(prunable_layers)\n for thr, layer in zip(thr_arg, prunable_layers):\n if thr is not None:\n layer.prune_by_threshold(thr)\n\n return self\n\n def prune_by_rank(self, rank_arg: Union[int, float, Sized]):\n prunable_layers = self.prunable_layers\n if isinstance(rank_arg, Sized):\n assert len(prunable_layers) == len(rank_arg)\n else:\n rank_arg = [rank_arg] * len(prunable_layers)\n for rank, layer in zip(rank_arg, prunable_layers):\n if rank is not None:\n layer.prune_by_rank(rank)\n\n return self\n\n def prune_by_pct(self, pct_arg: Union[int, float, Sized]):\n prunable_layers = self.prunable_layers\n if isinstance(pct_arg, Sized):\n assert len(prunable_layers) == len(pct_arg)\n else:\n pct_arg = [pct_arg] * len(prunable_layers)\n for pct, layer in zip(pct_arg, prunable_layers):\n if pct is not None:\n layer.prune_by_pct(pct)\n\n return self\n\n def random_prune_by_pct(self, pct_arg: Union[int, float, Sized]):\n prunable_layers = self.prunable_layers\n if isinstance(pct_arg, Sized):\n assert len(prunable_layers) == len(pct_arg)\n else:\n pct_arg = [pct_arg] * len(prunable_layers)\n for pct, layer in zip(pct_arg, prunable_layers):\n if pct is not None:\n layer.random_prune_by_pct(pct)\n\n return self\n\n def calc_num_prunable_params(self, count_bias=True, display=False):\n total_param_in_use = 0\n total_param = 0\n for layer, layer_prefix in zip(self.prunable_layers, self.prunable_layer_prefixes):\n num_bias = layer.bias.nelement() if layer.bias is not None and count_bias else 0\n num_weight = layer.num_weight\n num_params_in_use = num_weight + num_bias\n num_params = layer.weight.nelement() + num_bias\n total_param_in_use += num_params_in_use\n total_param += num_params\n\n if display:\n print(\"Layer name: {}. remaining/all: {}/{} = {}\".format(layer_prefix, num_params_in_use, num_params,\n num_params_in_use / num_params))\n if display:\n print(\"Total: remaining/all: {}/{} = {}\".format(total_param_in_use, total_param,\n total_param_in_use / total_param))\n return total_param_in_use, total_param\n\n def nnz(self, count_bias=True):\n # number of parameters in use in prunable layers\n return self.calc_num_prunable_params(count_bias=count_bias)[0]\n\n def nelement(self, count_bias=True):\n # number of all parameters in prunable layers\n return self.calc_num_prunable_params(count_bias=count_bias)[1]\n\n def density(self, count_bias=True):\n total_param_in_use, total_param = self.calc_num_prunable_params(count_bias=count_bias)\n return total_param_in_use / total_param\n\n def _get_module_by_list(self, module_names: List):\n module = self\n for name in module_names:\n module = getattr(module, name)\n return module\n\n def get_module_by_name(self, module_name: str):\n return self._get_module_by_list(module_name.split('.'))\n\n def set_module_by_name(self, module_name: str, new_module):\n splits = module_name.split('.')\n self._get_module_by_list(splits[:-1]).__setattr__(splits[-1], new_module)\n\n def get_mask_by_name(self, param_name: str):\n if param_name.endswith(\"bias\"): # todo\n return None\n module = self._get_module_by_list(param_name.split('.')[:-1])\n return module.mask if hasattr(module, \"mask\") else None\n\n @torch.no_grad()\n def reinit_from_model(self, final_model):\n assert isinstance(final_model, self.__class__)\n for self_layer, layer in zip(self.prunable_layers, final_model.prunable_layers):\n self_layer.mask = layer.mask.clone().to(self_layer.mask.device)\n\n def to_sparse(self):\n self_copy = deepcopy(self)\n for module_name, old_module in zip(self.prunable_layer_prefixes, self.prunable_layers):\n self_copy.set_module_by_name(module_name, old_module.to_sparse())\n self.collect_prunable_layers()\n return self_copy\n\n def to(self, *args, **kwargs):\n device = torch._C._nn._parse_to(*args, **kwargs)[0]\n if device is not None:\n # move masks to device\n for m in self.prunable_layers:\n m.move_data(device)\n return super(BaseModel, self).to(*args, **kwargs)\n" ]
[ [ "torch._C._nn._parse_to", "torch.no_grad" ] ]
veqtrus/electricitymap-contrib
[ "4ec4857da1315899159688fe1fb7f95376a6badd" ]
[ "parsers/AX.py" ]
[ "#!/usr/bin/env python3\n# The arrow library is used to handle datetimes\nimport arrow\n# The request library is used to fetch content through HTTP\nimport requests\n\n# Numpy and PIL are used to process the image\nimport numpy as np\nfrom PIL import Image\n\n\ndef _get_masks(session=None):\n Minus = np.array([[[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255], [255, 255, 255], [255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype = np.uint8)\n Minus = Image.fromarray(Minus)\n \n Dot = np.array([[[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255], [255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]]], dtype=np.uint8)\n Dot = Image.fromarray(Dot)\n\n Zero = np.array([[[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 255], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype = np.uint8)\n Zero = Image.fromarray(Zero)\n\n One = np.array([[[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[0, 0, 0],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype = np.uint8)\n One = Image.fromarray(One)\n\n Two = np.array([[[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype = np.uint8)\n Two = Image.fromarray(Two)\n\n Three = np.array([[[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype =np.uint8)\n Three = Image.fromarray(Three)\n\n Four = np.array([[[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype=np.uint8)\n Four = Image.fromarray(Four)\n\n Five = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 255]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype = np.uint8)\n Five = Image.fromarray(Five)\n\n Six = np.array([[[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[0, 0, 0],[0, 0, 0], [255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255], [255, 255, 255], [255, 255, 255]],\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 255]],\n [[0, 0, 0], [0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0],[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 255]],\n [[255, 255, 255], [255, 255, 255], [255, 255, 255],[255, 255, 255], [255, 255, 255],[255, 255, 255]]], dtype = np.uint8)\n Six = Image.fromarray(Six)\n\n Seven = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0], [255, 255, 255], [255, 255, 255]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype = np.uint8)\n Seven = Image.fromarray(Seven)\n\n Eight = np.array([[[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0], [255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0], [0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255], [0, 0, 0],[0, 0, 0]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype = np.uint8)\n Eight = Image.fromarray(Eight)\n\n Nine = np.array([[[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[0, 0, 0],[0, 0, 0],[255, 255, 255],[255, 255, 255],[0, 0, 0],[0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],\n [[255, 255, 255],[0, 0, 0],[0, 0, 0],[0, 0, 0],[0, 0, 0],[255, 255, 255]],\n [[255, 255, 255],[255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255]]], dtype=np.uint8)\n Nine = Image.fromarray(Nine)\n \n shorts = ['-','.','0','1','2','3','4','5','6','7','8','9']\n masks = [Minus, Dot, Zero, One, Two, Three, Four, Five, Six, Seven, Eight, Nine]\n \n return dict(zip(shorts,masks))\n \n\ndef _fetch_data(session=None):\n # Load masks for reading numbers from the image\n # Create a dictionary of symbols and their pixel masks\n mapping = _get_masks(session)\n\n # Download the updating image from Kraftnät Åland\n r = session or requests.session()\n \n url = 'http://194.110.178.135/grafik/stamnat.php'\n \n im = Image.open(r.get(url, stream=True).raw)\n # Get timestamp\n fetchtime = arrow.utcnow().floor('second').to('Europe/Mariehamn')\n \n # \"data\" is a height x width x 3 RGB numpy array\n data = np.array(im) \n #red, green, blue, alpha = data.T # Temporarily unpack the bands for readability\n red, green, blue = data.T\n # Color non-blue areas in the image with white\n blue_areas = ((red == 0) & (green == 0) & (blue == 255))\n data[~blue_areas.T] = (255, 255, 255)\n # Color blue areas in the image with black\n data[blue_areas.T] = (0, 0, 0)\n \n # Transform the array back to image\n im = Image.fromarray(data)\n \n shorts = mapping.keys()\n\n # check import from Sweden\n SE3Flow = []\n for x in range(80, 130-6):\n for abr in shorts:\n im1 = im.crop((x, 443, x+6, 452))\n if im1 == mapping[abr]:\n SE3Flow.append(abr)\n SE3Flow = \"\".join(SE3Flow)\n SE3Flow = round(float(SE3Flow),1)\n\n # export Åland-Finland(Kustavi/Gustafs)\n \n GustafsFlow=[]\n for x in range(780, 825-6):\n for abr in shorts:\n im1 = im.crop((x, 43, x+6, 52))\n if im1 == mapping[abr]:\n GustafsFlow.append(abr)\n GustafsFlow = \"\".join(GustafsFlow)\n GustafsFlow = round(float(GustafsFlow),1)\n \n # Reserve cable import Naantali-Åland\n # Åland administration does not allow export\n # to Finland through this cable\n FIFlow = []\n for x in range(760, 815-6):\n for abr in shorts:\n im1 = im.crop((x, 328, x+6, 337))\n if im1 == mapping[abr]:\n FIFlow.append(abr)\n FIFlow = \"\".join(FIFlow)\n FIFlow = round(float(FIFlow),1)\n\n\n # The shown total consumption is not reliable according to the TSO\n # Consumption\n # Cons = []\n # for x in range(650, 700-6):\n # for abr in shorts:\n # im1 = im.crop((x, 564, x+6, 573))\n # if im1 == mapping[abr]:\n # Cons.append(abr)\n # Cons = \"\".join(Cons)\n # Cons = round(float(Cons),1)\n\n # Wind production\n WProd = []\n for x in range(650, 700-6):\n for abr in shorts:\n im1 = im.crop((x, 576, x+6, 585))\n if im1 == mapping[abr]:\n WProd.append(abr)\n WProd = \"\".join(WProd)\n WProd = round(float(WProd),1)\n\n # Fossil fuel production\n FProd = []\n for x in range(650, 700-6):\n for abr in shorts:\n im1 = im.crop((x, 588, x+6, 597))\n if im1 == mapping[abr]:\n FProd.append(abr)\n FProd = \"\".join(FProd)\n FProd = round(float(FProd),1)\n \n # Both are confirmed to be import from Finland by the TSO\n FIFlow = FIFlow+GustafsFlow\n \n # Calculate sum of exchanges\n SumExchanges = SE3Flow+FIFlow\n \n # Calculate total production\n TotProd = FProd+WProd\n \n # Calculate total consumption\n Cons = round(TotProd + SumExchanges,1)\n \n # The production that is not fossil fuel or wind based is unknown\n # Impossible to estimate with current data\n # UProd = TotProd - WProd - FProd\n \n obj = dict({'production':TotProd,'consumption':Cons,'wind':WProd,\n 'fossil':FProd,'SE3->AX':SE3Flow,\n 'FI->AX':FIFlow,'fetchtime':fetchtime})\n \n return obj\n\n\ndef fetch_production(zone_key='AX', session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n obj = _fetch_data(session)\n\n data = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'kraftnat.aland.fi',\n 'datetime': arrow.get(obj['fetchtime']).datetime\n }\n data['production']['biomass'] = None\n data['production']['coal'] = 0\n data['production']['gas'] = 0\n data['production']['hydro'] = None\n data['production']['nuclear'] = 0\n data['production']['oil'] = obj['fossil']\n data['production']['solar'] = None\n data['production']['wind'] = obj['wind']\n data['production']['geothermal'] = None\n data['production']['unknown'] = None\n \n return data\n\n\ndef fetch_consumption(zone_key='AX', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n obj = _fetch_data(session)\n \n data = {\n 'zoneKey': zone_key,\n 'datetime': arrow.get(obj['fetchtime']).datetime,\n 'consumption': obj['consumption'],\n 'source': 'kraftnat.aland.fi'\n }\n \n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None) -> dict:\n \"\"\"Requests the last known power exchange (in MW) between two countries.\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n obj = _fetch_data(session)\n\n data = {\n 'sortedZoneKeys': '->'.join(sorted([zone_key1, zone_key2])),\n 'source': 'kraftnat.aland.fi',\n 'datetime': arrow.get(obj['fetchtime']).datetime\n }\n\n # Country codes are sorted in order to enable easier indexing in the database\n sorted_zone_keys = sorted([zone_key1, zone_key2])\n # Here we assume that the net flow returned by the api is the flow from\n # country1 to country2. A positive flow indicates an export from country1\n # to country2. A negative flow indicates an import.\n \n if '->'.join(sorted([zone_key1, zone_key2])) in ['AX->SE', 'AX->SE-SE3']:\n netFlow = obj['SE3->AX']\n \n elif '->'.join(sorted([zone_key1, zone_key2]))== 'AX->FI':\n netFlow = obj['FI->AX'] # Import is positive\n \n # The net flow to be reported should be from the first country to the second\n # (sorted alphabetically). This is NOT necessarily the same direction as the flow\n # from country1 to country2\n \n # AX is before both FI and SE\n data['netFlow'] = round(-1*netFlow,1)\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_consumption() ->')\n print(fetch_consumption())\n print('fetch_exchange(AX, FI) ->')\n print(fetch_exchange('FI', 'AX'))\n print('fetch_exchange(AX, SE) ->')\n print(fetch_exchange('SE', 'AX'))\n" ]
[ [ "numpy.array" ] ]
drcastillo/hicss2020
[ "0a812257215c75054d8b891e23c933d6a8327c0d" ]
[ "utils/helpful_util.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Reference:\n\n#from __future__ import print_function\n#from utils.heaton_utils import *\n\nimport numpy as np\nimport warnings\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nimport glob\n#pip install counter\nfrom collections import Counter\n\nimport pickle\nimport sklearn\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve\n\nfrom keras.models import load_model\nfrom keras.models import model_from_json\nfrom sklearn.model_selection import train_test_split\n\nimport seaborn as sns\nfrom IPython.display import display, HTML\nfrom sklearn.metrics import classification_report\nfrom utils.perturbation import load_models_lendingclub\nfrom IPython.display import display_html, display, HTML\nimport lime.lime_tabular\nimport lime\n\nclass KerasModelUtil:\n\n modelwts_extension = \"h5\"\n json_extension = \"json\"\n pickle_extension = \"p\"\n\n def save(self, model_dir, model_name, model, label_class_map):\n if model_dir.endswith('/') == False:\n model_dir = model_dir + '/'\n\n # put the file name into specific tokens\n fn_base, sep, tail = model_name.partition('.')\n if not sep:\n sep = \".\"\n\n json_fn = model_dir + fn_base + sep + self.json_extension\n\n wt_ext = tail\n if not wt_ext:\n wt_ext = self.modelwts_extension\n wt_fn = model_dir + fn_base + sep + wt_ext\n\n pickle_fn = model_dir + fn_base + sep + self.pickle_extension\n\n pickle.dump(label_class_map, open(pickle_fn, 'wb'))\n\n # serialize model to JSON\n model_json = model.to_json()\n\n with open(json_fn, \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n model.save_weights(wt_fn)\n\n def load(self, model_dir, model_name, input_shape=(None, 224, 224, 3)):\n # Load the json model first\n if model_dir.endswith('/') == False:\n model_dir = model_dir + '/'\n\n # put the file name into specific tokens\n fn_base, sep, tail = model_name.partition('.')\n if not sep:\n sep = \".\"\n\n json_fn = model_dir + fn_base + sep + self.json_extension\n json_file = open(json_fn, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n\n # form the model from the json and rebuild the layers\n loaded_model = model_from_json(loaded_model_json)\n loaded_model.build(input_shape=input_shape)\n\n # Load the weights\n wt_ext = tail\n if not wt_ext:\n wt_ext = self.modelwts_extension\n wt_fn = model_dir + fn_base + sep + wt_ext\n loaded_model.load_weights(wt_fn)\n\n #print(\"Loaded model from disk\")\n\n # Load the labels and Class ids\n pickle_fn = model_dir + fn_base + sep + self.pickle_extension\n label_classids = pickle.load(open(pickle_fn, \"rb\"))\n class_label_map = {v: k for k, v in label_classids.items()}\n #print(label_classids)\n #print(classids_labels)\n\n return loaded_model, class_label_map\n\n\n##################################################\n# Keras callbacks for plotting training model\n# accuracy and loss\n##################################################\nfrom IPython.display import clear_output\nimport math\nimport keras\n\n\n#Can just import LiveLossPlot & add to model callbacks.\nclass TrainingPlot(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.i = 0\n self.x = []\n self.losses = []\n self.val_losses = []\n self.acc = []\n self.val_acc = []\n\n self.logs = []\n\n def on_epoch_end(self, epoch, logs={}):\n\n self.logs.append(logs)\n self.x.append(self.i)\n self.losses.append(logs.get('loss'))\n self.val_losses.append(logs.get('val_loss'))\n self.acc.append(logs.get('acc'))\n self.val_acc.append(logs.get('val_acc'))\n self.i += 1\n f, (ax1, ax2) = plt.subplots(1, 2, sharex=False)\n\n clear_output(wait=True)\n\n ax1.set_yscale('log')\n ax1.plot(self.x, self.losses, label=\"training loss\")\n ax1.plot(self.x, self.val_losses, label=\"validation loss\")\n ax1.legend()\n\n ax2.set_ylim(0, 1.0)\n ax2.plot(self.x, self.acc, label=\"training accuracy\")\n ax2.plot(self.x, self.val_acc, label=\"validation accuracy\")\n ax2.legend()\n\n plt.show()\n\n\n##################################################\n# Utility code for computing a Confusion Matrix\n##################################################\n\nimport matplotlib.pyplot as plt #for plotting\nimport itertools as it\n\n\n#Note, this code is taken straight from the SKLEARN website, a nice way of viewing confusion matrix.\ndef plot_confusion_matrix(cm,\n classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n\n Note: class is a listlike parameter. Pass in list of classes, eg: [\"No Loan\", \"Loan\"]\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = cm.max() / 2.\n for i, j in it.product(range(cm.shape[0]), range(cm.shape[1])):\n value = '{0:.2g}'.format(cm[i, j])\n plt.text(j,\n i,\n value,\n fontsize=10,\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\n\n##################################################\n# Utility code for measuring model performance given dataset size\n##################################################\ndef plot_learning_curve(estimator,\n title,\n X,\n y,\n ylim=None,\n cv=None,\n n_jobs=-1,\n train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"Generate a simple plot of the test and training learning curve\"\"\"\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes,\n train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std,\n alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes,\n test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std,\n alpha=0.1,\n color=\"g\")\n plt.plot(train_sizes,\n train_scores_mean,\n 'o-',\n color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes,\n test_scores_mean,\n 'o-',\n color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n\n\n\ndef display_sklearn_feature_importance(data, set, features, n_features):\n '''\n Parameters:\n data: data object; coomatrix w/ encoded features\n n_features: number of features to visualize\n set: str;\n 'lendingclub' - load lending club models\n 'uci' - load uci models\n Returns:\n Graph of basic feature importance measurements\n\n '''\n if 'uci' in set:\n rfc, gbc, logit, keras_ann, sk_ann = load_models_uci()\n else:\n rfc, gbc, logit, keras_ann, sk_ann = load_models_lendingclub()\n feature_importance = pd.DataFrame({\n \"feature\":\n features,\n \"RF_Feature_Importance\":\n np.round(rfc.feature_importances_, 4),\n \"GBC_Feature_Importance\":\n np.round(gbc.feature_importances_, 4),\n \"Logit_Coeff\":\n np.round(logit.coef_[0], 4),\n \"Max_Feature_Val\":\n pd.DataFrame(data.toarray(), columns=features).max(),\n })\n\n n = n_features\n feature_importance['coeff_max'] = feature_importance[\n 'Logit_Coeff'] * feature_importance['Max_Feature_Val']\n temp = feature_importance.nlargest(n, 'RF_Feature_Importance')\n sns.barplot(temp['RF_Feature_Importance'], temp['feature'])\n plt.title('Random Forest - Feature Importance Top {}'.format(n_features))\n plt.show()\n\n temp = feature_importance.nlargest(n, 'GBC_Feature_Importance')\n sns.barplot(temp['GBC_Feature_Importance'], temp['feature'])\n plt.title('Gradient Boosted Classifier - Feature Importance Top {}'.format(\n n_features))\n plt.show()\n\n #We want to show the total possible feature impact here. Take the max of each feature in the training set by the logit coeff.\n lookup = pd.DataFrame(data.toarray(), columns=features).max()\n temp = feature_importance.nlargest(int(n / 2), 'coeff_max')\n temp1 = feature_importance.nsmallest(int(n / 2), 'coeff_max')\n temp = pd.concat([temp, temp1])\n sns.barplot(temp['coeff_max'], temp['feature'])\n plt.title('Logistic Regression - Coefficients Top&Bottom {}'.format(\n int(n_features / 2)))\n plt.show()\n\n\ndef get_best_score(x, y):\n try:\n return sklearn.metrics.accuracy_score(x, y.predict(encoded_test))\n except:\n return sklearn.metrics.accuracy_score(x, keras_ann.predict_classes(encoded_test.toarray()))\n\n\ndef display_side_by_side(*args):\n html_str = ''\n for df in args:\n html_str += df.to_html()\n display_html(html_str.replace('table', 'table style=\"display:inline\"'),\n raw=True)\n\ndef neg_pos_logit_coefficients(model, features):\n logistic_regress_coeff = pd.DataFrame({\n \"features\": features,\n \"Coef\": model.coef_[0]\n })\n\n neg_coef = round(logistic_regress_coeff[\n logistic_regress_coeff['Coef'] < 0].sort_values('Coef', ascending=True),2).head(15)\n pos_coef = round(logistic_regress_coeff[\n logistic_regress_coeff['Coef'] > 0].sort_values('Coef', ascending=False),2).head(15)\n display_side_by_side(neg_coef, pos_coef)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "sklearn.model_selection.learning_curve", "matplotlib.pyplot.imshow", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.text", "numpy.linspace", "numpy.mean", "matplotlib.pyplot.subplots", "pandas.concat", "matplotlib.pyplot.ylim", "numpy.std", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "pandas.DataFrame", "matplotlib.pyplot.show", "numpy.round", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel" ] ]
shday/dash-bio-1
[ "81bb6fa257febb59d7841f8c5573e7231f5a9095" ]
[ "tests/integration/test_clustergram.py" ]
[ "import json\nimport pandas as pd\n\nimport dash\nimport dash_html_components as html\nimport dash_bio\n\nfrom common_features import nested_component_layout, nested_component_app_callback\n\n_data = None\n\n_mtcars_data = pd.read_csv(\n \"tests/dashbio_demos/dash-clustergram/data/mtcars.tsv\", delimiter=\"\\t\", skiprows=4\n).set_index(\"model\")\n\n_data = _mtcars_data.values\n\n\ndef test_dbcl001_colorscale(dash_duo):\n\n app = dash.Dash(__name__)\n\n app.layout = html.Div(nested_component_layout(dash_bio.Clustergram(data=_data)))\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=_data,\n test_prop_name=\"color_map\",\n test_prop_value=json.dumps([[0, \"blue\"], [0.5, \"yellow\"], [1, \"pink\"]]),\n prop_value_type=\"list\",\n path_to_test_prop='[\"data\"][41][\"colorscale\"]',\n take_snapshot=True,\n )\n\n\ndef test_dbcl002_cluster_by_row_or_col(dash_duo):\n\n app = dash.Dash(__name__)\n\n app.layout = html.Div(nested_component_layout(dash_bio.Clustergram(data=_data)))\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=_data,\n test_prop_name=\"cluster\",\n test_prop_value=\"row\",\n prop_value_type=\"string\",\n )\n\n assert len(dash_duo.find_elements(\"g.subplot.x2y2\")) == 0\n assert len(dash_duo.find_elements(\"g.subplot.x4y4\")) == 1\n\n # create a new instance of the app to test column clustering\n\n app = dash.Dash(__name__)\n\n app.layout = html.Div(nested_component_layout(dash_bio.Clustergram(data=_data)))\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=_data,\n test_prop_name=\"cluster\",\n test_prop_value=\"col\",\n prop_value_type=\"string\",\n take_snapshot=True,\n )\n\n assert len(dash_duo.find_elements(\"g.subplot.x4y4\")) == 0\n assert len(dash_duo.find_elements(\"g.subplot.x2y2\")) == 1\n\n\ndef test_dbcl003_row_col_thresholds(dash_duo):\n\n app = dash.Dash(__name__)\n\n app.layout = html.Div(nested_component_layout(dash_bio.Clustergram(data=_data)))\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=_data,\n test_prop_name=\"color_threshold\",\n test_prop_value=json.dumps({\"row\": 250, \"col\": 700}),\n prop_value_type=\"dict\",\n take_snapshot=True,\n )\n\n # there should be 9 traces for the column dendrogram\n # plus one trace for the background\n assert len(dash_duo.find_elements(\"g.subplot.x2y2 > g.plot g.trace.scatter\")) == 10\n\n # 30 traces for the row dendrogram, plus one for the background\n assert len(dash_duo.find_elements(\"g.subplot.x4y4 > g.plot g.trace.scatter\")) == 31\n\n\ndef test_dbcl004_col_annotations(dash_duo):\n\n app = dash.Dash(__name__)\n\n app.layout = html.Div(nested_component_layout(dash_bio.Clustergram(data=_data)))\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=_data,\n test_prop_name=\"col_group_marker\",\n test_prop_value=json.dumps(\n [{\"group\": 1, \"annotation\": \"cluster one\", \"color\": \"rgb(62, 248, 199)\"}]\n ),\n extra_props={\"color_threshold\": {\"row\": 250, \"col\": 700}},\n prop_value_type=\"list\",\n take_snapshot=True,\n )\n\n # the annotation has shown up\n assert len(dash_duo.find_elements(\"g.subplot.x8y8\")) == 1\n\n # the annotation is the correct color\n dash_duo.wait_for_style_to_equal(\n \"g.subplot.x8y8 g.plot g.lines > path\", \"stroke\", \"rgb(62, 248, 199)\"\n )\n\n\ndef test_dbcl005_row_annotations(dash_duo):\n\n app = dash.Dash(__name__)\n\n app.layout = html.Div(nested_component_layout(dash_bio.Clustergram(data=_data)))\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=_data,\n test_prop_name=\"row_group_marker\",\n test_prop_value=json.dumps(\n [{\"group\": 2, \"annotation\": \"cluster two\", \"color\": \"rgb(248, 62, 199)\"}]\n ),\n extra_props={\"color_threshold\": {\"row\": 250, \"col\": 700}},\n prop_value_type=\"list\",\n take_snapshot=True,\n )\n\n # the annotation has shown up\n assert len(dash_duo.find_elements(\"g.subplot.x6y6\")) == 1\n\n # the annotation is the correct color\n dash_duo.wait_for_style_to_equal(\n \"g.subplot.x6y6 g.plot g.lines > path\", \"stroke\", \"rgb(248, 62, 199)\"\n )\n\n\ndef test_dbcl006_df_input_row_cluster(dash_duo):\n\n app = dash.Dash(__name__)\n\n # run the same test as dbcl002 (row clustering) where table of\n # observations (data argument) is left as a DataFrame\n assert isinstance(_mtcars_data, pd.DataFrame)\n app.layout = html.Div(\n nested_component_layout(dash_bio.Clustergram(data=_mtcars_data))\n )\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=_data,\n test_prop_name=\"cluster\",\n test_prop_value=\"row\",\n prop_value_type=\"string\",\n )\n\n assert len(dash_duo.find_elements(\"g.subplot.x2y2\")) == 0\n assert len(dash_duo.find_elements(\"g.subplot.x4y4\")) == 1\n\n\ndef test_dbcl007_hidden_labels(dash_duo):\n\n app = dash.Dash(__name__)\n\n data = _mtcars_data\n row_labels = list(_mtcars_data.index)\n col_labels = list(_mtcars_data.columns)\n\n app.layout = html.Div(\n nested_component_layout(\n dash_bio.Clustergram(\n data=data, row_labels=row_labels, column_labels=col_labels\n )\n )\n )\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=data,\n test_prop_name=\"hidden_labels\",\n test_prop_value=\"row\",\n prop_value_type=\"string\",\n )\n\n # ensure that row labels are hidden\n assert len(dash_duo.find_elements(\"g.yaxislayer-above g.y5tick\")) == 0\n # ensure that column labels are displayed\n assert len(dash_duo.find_elements(\"g.xaxislayer-above g.x5tick\")) == len(col_labels)\n\n # create a new instance of the app to test hiding of column labels\n\n app = dash.Dash(__name__)\n\n app.layout = html.Div(\n nested_component_layout(\n dash_bio.Clustergram(\n data=data, row_labels=row_labels, column_labels=col_labels\n )\n )\n )\n\n nested_component_app_callback(\n app,\n dash_duo,\n component=dash_bio.Clustergram,\n component_data=data,\n test_prop_name=\"hidden_labels\",\n test_prop_value=\"col\",\n prop_value_type=\"string\",\n )\n\n # ensure that column labels are hidden\n assert len(dash_duo.find_elements(\"g.xaxislayer-above g.x5tick\")) == 0\n # ensure that row labels are displayed\n assert len(dash_duo.find_elements(\"g.yaxislayer-above g.y5tick\")) == len(row_labels)\n" ]
[ [ "pandas.read_csv" ] ]
Tim15/MLib
[ "2222dc67ec0cbaf07942371120be1690359e0ab3" ]
[ "MLLib/NeuralNet/net.py" ]
[ "# import curses\r\n# import datetime\r\n#\r\n# stdscr = curses.initscr()\r\n# curses.noecho()\r\n# stdscr.nodelay(1) # set getch() non-blocking\r\n#\r\n# stdscr.addstr(0,0,\"Press \\\"p\\\" to show count, \\\"q\\\" to exit...\")\r\n# line = 1\r\n# try:\r\n# while 1:\r\n# c = stdscr.getch()\r\n# if c == ord('p'):\r\n# stdscr.addstr(line,0,\"Some text here\")\r\n# line += 1\r\n# elif c == ord('q'): break\r\n#\r\n# \"\"\"\r\n# Do more things\r\n# \"\"\"\r\n#\r\n# finally:\r\n# curses.endwin()\r\nimport numpy as np\r\ndef nonlin(x, deriv=False):\r\n if deriv:\r\n return (x*(1-x))\r\n return 1/(1+np.exp(-x))\r\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\r\ny = np.array([[0],[1],[1],[0]])\r\nnp.random.seed(1)\r\nnums = [2, 4, 1]\r\nnetwork = [2*np.random.random((nums[i]+1,nums[i+1]))-1 for i in range(len(nums)-1)]\r\nprint('network', network)\r\nfor j in range(100000):\n outputs = [X]\n for layer in network:\n outputs[-1] = np.c_[outputs[-1], np.ones(len(outputs[-1]))]\n outputs.append(nonlin(np.dot(outputs[-1], layer)))\r\n print('outputs', outputs, '\\n')\r\n errors = [y - outputs[2]]\r\n print('errors', errors)\r\n # if(j % 100000) == 0: # Only print the error every 10000 steps, to save time and limit the amount of output.\r\n # print('outputs, prediction', l0, l1, l2, y, l2.shape)\r\n # print('weights', self.network0, self.network1)\r\n # print(\"Error: \" + str(np.mean(np.abs(errors[2]))))\n # print('Training input l0:', l0, '\\nDot product between training and rand:', np.dot(l0, self.network0), 'non linear dot product l1:', l1, '\\n dot product between l1, and self.network1:', np.dot(l1, self.network1), 'nonlinear dot product between l1, and self.network1:', l2, 'input and output training data: ', self.network0, self.network1, errors[2], nonlin(l2, deriv=True))\r\n deltas = [errors[-1]*nonlin(outputs[2], deriv=True)]\r\n print('deltas', deltas)\r\n # if(j % 100000) == 0:\r\n # print('l2Error, nonlin\\'(l2)', errors[2], nonlin(l2, deriv=True))\r\n # print('l2Delta, self.network1.t', l2_delta, self.network1.T)\r\n for i in range(len(network)-1):\r\n errors.insert(0, deltas[0].dot(network[i+1].T))\r\n print('layer', i, 'error', errors[0])\r\n # if(j % 100000) == 0:\r\n # print('l1Error', errors[1])\r\n # print(nonlin(outputs[i+1],deriv=True))\r\n deltas.insert(0, errors[0] * nonlin(outputs[i+1],deriv=True))\r\n print('layer', i, 'delta', deltas[0], '\\n')\r\n # if(j % 100000) == 0:\r\n # print('self.network1, l1.T, l2Delta', network[1].shape, outputs[1].T.shape, deltas[1].shape)\r\n # if(j % 100000) == 0:\r\n # print('self.network0, l0.T, l1Delta', network[0].shape, outputs[0].T.shape, deltas[0].shape)\r\n #update weights (no learning rate term)\r\n for i in range(len(deltas)):\r\n delta = outputs[i].T.dot(deltas[i])\r\n print(delta,'\\n', network[i])\r\n network[i] += delta\r\n\r\nprint(\"Output after training\")\r\nprint(outputs[2])\r\n" ]
[ [ "numpy.random.seed", "numpy.exp", "numpy.random.random", "numpy.array", "numpy.dot" ] ]
zakerifahimeh/FaceLib
[ "bf8eadc26baf04907e3800ada02896ac7056080c" ]
[ "facelib/InsightFace/models/data/config.py" ]
[ "from easydict import EasyDict as edict\nfrom pathlib import Path\nimport torch\nfrom torch.nn import CrossEntropyLoss\n\n\ndef get_config(training=True):\n conf = edict()\n conf.data_path = Path('models/data')\n conf.work_path = Path('weights/')\n conf.model_path = conf.work_path / 'models'\n conf.log_path = conf.work_path / 'log'\n conf.save_path = conf.work_path\n conf.input_size = [112, 112]\n conf.embedding_size = 512\n conf.use_mobilfacenet = False\n conf.net_depth = 50\n conf.drop_ratio = 0.6\n conf.net_mode = 'ir_se' # or 'ir'\n conf.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n conf.data_mode = 'emore'\n conf.vgg_folder = conf.data_path / 'faces_vgg_112x112'\n conf.ms1m_folder = conf.data_path / 'faces_ms1m_112x112'\n conf.emore_folder = conf.data_path / 'faces_emore'\n conf.batch_size = 100 # irse net depth 50\n # conf.batch_size = 200 # mobilefacenet\n # --------------------Training Config ------------------------\n if training:\n conf.log_path = conf.work_path / 'log'\n conf.save_path = conf.work_path / 'save'\n # conf.weight_decay = 5e-4\n conf.lr = 1e-3\n conf.momentum = 0.9\n conf.pin_memory = True\n # conf.num_workers = 4 # when batchsize is 200\n conf.num_workers = 3\n conf.ce_loss = CrossEntropyLoss()\n # --------------------Inference Config ------------------------\n else:\n conf.facebank_path = conf.data_path / 'facebank'\n conf.threshold = 1.5\n conf.face_limit = 10\n # when inference, at maximum detect 10 faces in one image, my laptop is slow\n return conf\n" ]
[ [ "torch.cuda.is_available", "torch.nn.CrossEntropyLoss" ] ]
FurkanCan-eee/Convolutional-Neural-Network
[ "e7ee0d94075fa724d394b6a3c32e1b5abfe67285" ]
[ "Transfer Learning with MobileNetV2/MobileNetV2.py" ]
[ "# Packages\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport tensorflow.keras.layers as tfl\n\nfrom tensorflow.keras.preprocessing import image_dataset_from_directory\nfrom tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation\n\n# Creating the dataset and splitting it into training and validation sets\n\nBATCH_SIZE = 32\nIMG_SIZE = (160, 160)\ndirectory = \"dataset/\" # One can change this directory\ntrain_dataset = image_dataset_from_directory(directory,\n shuffle=True,\n batch_size=BATCH_SIZE,\n image_size=IMG_SIZE,\n validation_split=0.2,\n subset='training',\n seed=42)\nvalidation_dataset = image_dataset_from_directory(directory,\n shuffle=True,\n batch_size=BATCH_SIZE,\n image_size=IMG_SIZE,\n validation_split=0.2,\n subset='validation',\n seed=42)\n\n# Some of the images from the training set\n\nclass_names = train_dataset.class_names\n\nplt.figure(figsize=(10, 10))\nfor images, labels in train_dataset.take(1):\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype(\"uint8\"))\n plt.title(class_names[labels[i]])\n plt.axis(\"off\")\n \n#### Preprocessing and Augmentation of the data ####\n\n\"\"\"\ndataset.prefetch() is an important extra step in data preprocessing.\nUsing prefetch() prevents a memory bottleneck that can occur when reading from disk.\n\"\"\"\nAUTOTUNE = tf.data.experimental.AUTOTUNE\ntrain_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)\n\n# Function for data augmentation\n\ndef data_augmenter():\n '''\n Create a Sequential model composed of 2 layers\n Returns:\n tf.keras.Sequential\n '''\n \n data_augmentation = tf.keras.Sequential()\n data_augmentation.add(RandomFlip(\"horizontal\"))\n data_augmentation.add(RandomRotation(0.2))\n \n return data_augmentation\n \naugmenter = data_augmenter()\n\n\"\"\"\n# Testing the data_augmenter() function with one image\n\ndata_augmentation = data_augmenter()\n\nfor image, _ in train_dataset.take(1):\n plt.figure(figsize=(10, 10))\n first_image = image[0]\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n augmented_image = data_augmentation(tf.expand_dims(first_image, 0))\n plt.imshow(augmented_image[0] / 255)\n plt.axis('off')\n\"\"\"\n\n# Since we're using a pre-trained model that was trained on the normalization \n# values [-1,1], it's best practice to reuse that standard.\n\npreprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input\n\n# Let's try to train our base model using all the layers and weights from the pretrained model.\n\nIMG_SHAPE = IMG_SIZE + (3,)\nbase_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,\n include_top=True,\n weights='imagenet')\n\n# One can check model summary\n\n# base_model.summary()\n\n\"\"\"\nNote the last 2 layers here. They are so called top layers,\nand they are responsible of the classification in the model.\n\"\"\"\n# Their names\n\n# nb_layers = len(base_model.layers)\n# print(base_model.layers[nb_layers - 2].name)\n# print(base_model.layers[nb_layers - 1].name)\n\n# choosing the first batch from the tensorflow dataset to use the images, and runing \n# it through the MobileNetV2 base model to test out the predictions on some of our images.\n\n\"\"\"\nWe can see that shape of the batch is (32,1000). The number 32 refers to the batch size and \n1000 refers to the 1000 classes the model was pretrained on.\n\"\"\"\nimage_batch, label_batch = next(iter(train_dataset))\nfeature_batch = base_model(image_batch)\nprint(feature_batch.shape)\n\n# Decoding the predictions made by the model\n\n\"\"\"\nThe predictions returned by the base model below follow this format:\nFirst the class number, then a human-readable label, and last the probability of the image \nbelonging to that class. You'll notice that there are two of these returned for each image \nin the batch - these the top two probabilities returned for that image.\n\nThere's a whole lot of labels here, some of them hilariously wrong, but none of them say \"alpaca.\"\nThis is because MobileNet pretrained over ImageNet doesn't have the correct labels for alpacas,\nso when we use the full model, all we get is a bunch of incorrectly classified images.\n\nFortunately, you can delete the top layer, which contains all the classification labels,\nand create a new classification layer.\n\"\"\"\nbase_model.trainable = False\nimage_var = tf.Variable(image_batch)\npred = base_model(image_var)\n\ntf.keras.applications.mobilenet_v2.decode_predictions(pred.numpy(), top=2)\n\n# Alpaca model\n\n\"\"\"\nWe can use a pretrained model to modify the classifier task so that it's able to recognize alpacas.\nWe can achieve this in three steps:\n1- Delete the top layer (the classification layer)\n * Set include_top in base_model as False\n2- Add a new classifier layer\n * Train only one layer by freezing the rest of the network\n * A single neuron is enough to solve a binary classification problem.\n3- Freeze the base model and train the newly-created classifier layer\n * Set base model.trainable=False to avoid changing the weights and train only the new layer\n * Set training in base_model to False to avoid keeping track of statistics in the batch norm layer\n\"\"\"\ndef alpaca_model(image_shape=IMG_SIZE, data_augmentation=data_augmenter()):\n ''' Define a tf.keras model for binary classification out of the MobileNetV2 model\n Arguments:\n image_shape -- Image width and height\n data_augmentation -- data augmentation function\n Returns:\n Returns:\n tf.keras.model\n '''\n \n \n input_shape = image_shape + (3,)\n \n base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,\n include_top=False, # <== Important!!!!\n weights='imagenet') # From imageNet\n \n # Freeze the base model by making it non trainable\n base_model.trainable = False\n \n # create the input layer (Same as the imageNetv2 input size)\n inputs = tf.keras.Input(shape=input_shape) \n \n # apply data augmentation to the inputs\n x = data_augmentation(inputs)\n \n # data preprocessing using the same weights the model was trained on\n # Already Done -> preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input\n x = preprocess_input(x) \n \n # set training to False to avoid keeping track of statistics in the batch norm layer\n x = base_model(x, training=False) \n \n # Add the new Binary classification layers\n # use global avg pooling to summarize the info in each channel\n x = tfl.GlobalAveragePooling2D()(x) \n #include dropout with probability of 0.2 to avoid overfitting\n x = tfl.Dropout(0.2)(x)\n \n # create a prediction layer with one neuron (as a classifier only needs one)\n prediction_layer = tfl.Dense(1)\n \n outputs = prediction_layer(x) \n model = tf.keras.Model(inputs, outputs)\n \n return model\n\ndata_augmentation = data_augmenter()\nmodel2 = alpaca_model(IMG_SIZE, data_augmentation)\n\n# Compiling the model\n\nbase_learning_rate = 0.01\nmodel2.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n# Running the model\n\ninitial_epochs = 5\nhistory = model2.fit(train_dataset, validation_data=validation_dataset, epochs=initial_epochs)\n\n# Plotting the training and validation accuracy\n\nacc = [0.] + history.history['accuracy']\nval_acc = [0.] + history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nplt.figure(figsize=(8, 8))\nplt.subplot(2, 1, 1)\nplt.plot(acc, label='Training Accuracy')\nplt.plot(val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.ylabel('Accuracy')\nplt.ylim([min(plt.ylim()),1])\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(2, 1, 2)\nplt.plot(loss, label='Training Loss')\nplt.plot(val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.ylabel('Cross Entropy')\nplt.ylim([0,1.0])\nplt.title('Training and Validation Loss')\nplt.xlabel('epoch')\nplt.show()\n\n# Printing class names\n\nprint(class_names)\n\n# Fine-tunning the model to increase the accuracy\n\nbase_model.trainable = True\n# Let's take a look to see how many layers are in the base model\nprint(\"Number of layers in the base model: \", len(base_model.layers))\n\n# Fine-tune from this layer onwards\nfine_tune_at = 126\n\n##### One Way ########\n\"\"\"\nfor layer in base_model.layers:\n if layer.name == 'block_16_expand':\n break\n layer.trainable = False\n print('Layer ' + layer.name + ' frozen.')\n\"\"\"\n######## Other Way ######\n# Freeze all the layers before the `fine_tune_at` layer\nfor layer in base_model.layers[:fine_tune_at]:\n #print('Layer ' + layer.name + ' frozen.')\n layer.trainable = None\n \n# Define a BinaryCrossentropy loss function. Use from_logits=True\nloss_function= tf.keras.losses.BinaryCrossentropy(from_logits=True)\n# Define an Adam optimizer with a learning rate of 0.1 * base_learning_rate\noptimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate*0.1)# 0.001\n# Use accuracy as evaluation metric\nmetrics=['accuracy']\n\nmodel2.compile(loss=loss_function,\n optimizer = optimizer,\n metrics=metrics)\n\nfine_tune_epochs = 5\ntotal_epochs = initial_epochs + fine_tune_epochs\n\nhistory_fine = model2.fit(train_dataset,\n epochs=total_epochs,\n initial_epoch=history.epoch[-1],\n validation_data=validation_dataset)\n\n# Plotting the training and validation accuracy after fine-tunning process\n\nacc += history_fine.history['accuracy']\nval_acc += history_fine.history['val_accuracy']\n\nloss += history_fine.history['loss']\nval_loss += history_fine.history['val_loss']\nplt.figure(figsize=(8, 8))\nplt.subplot(2, 1, 1)\nplt.plot(acc, label='Training Accuracy')\nplt.plot(val_acc, label='Validation Accuracy')\nplt.ylim([0, 1])\nplt.plot([initial_epochs-1,initial_epochs-1],\n plt.ylim(), label='Start Fine Tuning')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(2, 1, 2)\nplt.plot(loss, label='Training Loss')\nplt.plot(val_loss, label='Validation Loss')\nplt.ylim([0, 1.0])\nplt.plot([initial_epochs-1,initial_epochs-1],\n plt.ylim(), label='Start Fine Tuning')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.xlabel('epoch')\nplt.show()\n\n\"\"\"\nWhat we should remember:\n\n - To adapt the classifier to new data: Delete the top layer, add a new \n classification layer, and train only on that layer\n - When freezing layers, avoid keeping track of statistics (like in the \n batch normalization layer)\n - Fine-tune the final layers of your model to capture high-level details \n near the end of the network and potentially improve accuracy\n\"\"\"\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "matplotlib.pyplot.ylabel", "tensorflow.Variable", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "tensorflow.keras.Sequential", "matplotlib.pyplot.title", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.experimental.preprocessing.RandomRotation", "tensorflow.keras.Input", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.experimental.preprocessing.RandomFlip", "matplotlib.pyplot.axis", "tensorflow.keras.losses.BinaryCrossentropy", "matplotlib.pyplot.ylim", "tensorflow.keras.preprocessing.image_dataset_from_directory", "tensorflow.keras.layers.GlobalAveragePooling2D", "matplotlib.pyplot.legend", "tensorflow.keras.applications.MobileNetV2", "tensorflow.keras.Model", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.xlabel" ] ]
xiacijie/dace
[ "2d942440b1d7b139ba112434bfa78f754e10bfe5" ]
[ "samples/simple/matmul.py" ]
[ "# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nfrom __future__ import print_function\n\nimport argparse\nimport dace\nimport numpy as np\nfrom typing import List\n\n# For optimizations\nfrom dace.transformation.dataflow import (DoubleBuffering, MapCollapse,\n MapExpansion, MapReduceFusion,\n StripMining, InLocalStorage,\n AccumulateTransient, Vectorization)\nfrom dace.transformation.interstate import FPGATransformSDFG\nfrom dace.transformation import helpers as xfutil\n\n# For library node implementations\nimport dace.libraries.blas\n\n# Define symbolic sizes for arbitrary inputs\nM = dace.symbol('M')\nK = dace.symbol('K')\nN = dace.symbol('N')\n\n# Define data type to use\ndtype = dace.float64\nnp_dtype = np.float64\n\n#####################################################################\n# Data-centric functions\n\n\n# Map-Reduce version of matrix multiplication\[email protected]\ndef matmul(A: dtype[M, K], B: dtype[K, N], C: dtype[M, N]):\n tmp = np.ndarray([M, N, K], dtype=A.dtype)\n\n # Multiply every pair of values to a large 3D temporary array\n for i, j, k in dace.map[0:M, 0:N, 0:K]:\n with dace.tasklet:\n in_A << A[i, k]\n in_B << B[k, j]\n out >> tmp[i, j, k]\n\n out = in_A * in_B\n\n # Sum last dimension of temporary array to obtain resulting matrix\n dace.reduce(lambda a, b: a + b, tmp, C, axis=2, identity=0)\n\n\n# Library node version of matrix multiplication, using the numpy interface\[email protected]\ndef matmul_lib(A: dtype[M, K], B: dtype[K, N]):\n return A @ B\n\n\n#####################################################################\n# Data-centric optimization helpers\n\n\ndef find_map_by_param(sdfg: dace.SDFG, pname: str) -> dace.nodes.MapEntry:\n \"\"\" Finds the first map entry node by the given parameter name. \"\"\"\n return next(n for n, _ in sdfg.all_nodes_recursive()\n if isinstance(n, dace.nodes.MapEntry) and pname in n.params)\n\n\ndef find_mapexit_by_param(sdfg: dace.SDFG, pname: str) -> dace.nodes.MapExit:\n \"\"\" Finds the first map exit node by the given parameter name. \"\"\"\n state, entry = next(\n (p, n) for n, p in sdfg.all_nodes_recursive()\n if isinstance(n, dace.nodes.MapEntry) and pname in n.params)\n return state.exit_node(entry)\n\n\n#####################################################################\n# Matrix multiplication data-centric optimization schemes\n\n\ndef optimize_for_cpu(sdfg: dace.SDFG, m: int, n: int, k: int):\n \"\"\" Optimize the matrix multiplication example for multi-core CPUs. \"\"\"\n # Ensure integers are 32-bit by default\n dace.Config.set('compiler', 'default_data_types', value='C')\n\n # Fuse the map and reduce nodes\n sdfg.apply_transformations(MapReduceFusion)\n\n # Find multiplication map\n entry = find_map_by_param(sdfg, 'k')\n\n # Create a tiling strategy\n divides_evenly = (m % 32 == 0) and (n % 32 == 0) and (k % 256 == 0)\n xfutil.tile(sdfg, entry, divides_evenly, False, k=256, i=32, j=32)\n xfutil.tile(sdfg, entry, divides_evenly, divides_evenly, j=16, i=4)\n\n # Reorder internal map to \"k,i,j\"\n xfutil.permute_map(entry, [2, 0, 1])\n\n # Add local storage for B in j tile: we apply InLocalStorage with a\n # parameter \"array\" named B, between the two maps of j and i\n regtile_j = find_map_by_param(sdfg, 'tile1_j')\n regtile_i = find_map_by_param(sdfg, 'tile1_i')\n InLocalStorage.apply_to(sdfg,\n dict(array='B'),\n node_a=regtile_j,\n node_b=regtile_i)\n\n if divides_evenly:\n # Add local storage for C\n exit_inner = find_mapexit_by_param(sdfg, 'k')\n exit_rti = find_mapexit_by_param(sdfg, 'tile1_i')\n AccumulateTransient.apply_to(sdfg,\n dict(array='C', identity=0),\n map_exit=exit_inner,\n outer_map_exit=exit_rti)\n\n # Vectorize microkernel map\n postamble = n % 4 != 0\n sdfg.apply_transformations(\n Vectorization,\n dict(vector_len=4, preamble=False, postamble=postamble))\n\n # Mark outer tile map as sequential to remove atomics\n find_map_by_param(sdfg,\n 'tile_k').map.schedule = dace.ScheduleType.Sequential\n\n # Collapse maps for more parallelism\n find_map_by_param(sdfg, 'o0').map.collapse = 2\n tile_i = find_map_by_param(sdfg, 'tile_i')\n tile_j = find_map_by_param(sdfg, 'tile_j')\n MapCollapse.apply_to(sdfg, _outer_map_entry=tile_i, _inner_map_entry=tile_j)\n tile_ij = find_map_by_param(sdfg, 'tile_i') # Find newly created map\n tile_ij.map.schedule = dace.ScheduleType.CPU_Multicore\n tile_ij.map.collapse = 2\n\n\ndef optimize_for_gpu(sdfg: dace.SDFG, m: int, n: int, k: int):\n \"\"\" Optimize the matrix multiplication example for GPUs. \"\"\"\n # Ensure integers are 32-bit by default\n dace.Config.set('compiler', 'default_data_types', value='C')\n\n # Fuse the map and reduce nodes\n sdfg.apply_transformations(MapReduceFusion)\n\n # Apply GPU transformation\n sdfg.apply_gpu_transformations()\n\n # Find multiplication map\n entry = find_map_by_param(sdfg, 'k')\n\n # Create a tiling strategy\n divides_evenly = (m % 64 == 0) and (n % 64 == 0) and (k % 8 == 0)\n xfutil.tile(sdfg, entry, divides_evenly, True, i=64, j=64, k=8)\n xfutil.tile(sdfg, entry, divides_evenly, True, i=8, j=4)\n\n # Create kernel schedule by collapsing and reordering maps\n gtile_i = find_map_by_param(sdfg, 'tile_i')\n gtile_j = find_map_by_param(sdfg, 'tile_j')\n btile_i = find_map_by_param(sdfg, 'tile1_i')\n btile_j = find_map_by_param(sdfg, 'tile1_j')\n MapCollapse.apply_to(sdfg,\n _outer_map_entry=gtile_i,\n _inner_map_entry=gtile_j)\n MapCollapse.apply_to(sdfg,\n _outer_map_entry=btile_i,\n _inner_map_entry=btile_j)\n btile = find_map_by_param(sdfg, 'tile1_i')\n btile.map.schedule = dace.ScheduleType.GPU_ThreadBlock\n\n # Add local storage (shared memory) for A and B on GPU\n ktile = find_map_by_param(sdfg, 'tile_k')\n smem_a = InLocalStorage.apply_to(sdfg,\n dict(array='A'),\n node_a=ktile,\n node_b=btile)\n smem_b = InLocalStorage.apply_to(sdfg,\n dict(array='B'),\n node_a=ktile,\n node_b=btile)\n sdfg.arrays[smem_a.data].storage = dace.StorageType.GPU_Shared\n sdfg.arrays[smem_b.data].storage = dace.StorageType.GPU_Shared\n\n # Add local storage (registers) for A and B\n ttile = find_map_by_param(sdfg, 'k')\n warptile, ttile = xfutil.extract_map_dims(sdfg, ttile, [2])\n InLocalStorage.apply_to(sdfg,\n dict(array='trans_gpu_A'),\n node_a=warptile,\n node_b=ttile)\n InLocalStorage.apply_to(sdfg,\n dict(array='trans_gpu_B'),\n node_a=warptile,\n node_b=ttile)\n\n # Add local storage (registers) for C\n state = next(s for s in sdfg.nodes() if warptile in s.nodes())\n warptile_exit = state.exit_node(warptile)\n btile_exit = state.exit_node(btile)\n AccumulateTransient.apply_to(sdfg,\n map_exit=warptile_exit,\n outer_map_exit=btile_exit)\n # Set C tile to zero on allocation\n c_access = next(n for n in state.data_nodes() if n.data == 'trans_gpu_C')\n c_access.setzero = True\n\n # Unroll microkernel maps\n ttile.map.unroll = True\n\n # Apply double-buffering on shared memory\n DoubleBuffering.apply_to(sdfg, _map_entry=ktile, _transient=smem_a)\n\n\n#####################################################################\n# Main function\n\nif __name__ == \"__main__\":\n # Arugments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-M\", type=int, nargs=\"?\", default=64)\n parser.add_argument(\"-K\", type=int, nargs=\"?\", default=64)\n parser.add_argument(\"-N\", type=int, nargs=\"?\", default=64)\n parser.add_argument('--version',\n choices=[\n 'unoptimized', 'optimize_cpu', 'optimize_gpu',\n 'mkl', 'cublas', 'fpga_naive', 'fpga_library'\n ],\n default='unoptimized',\n help='''Different available versions:\nunoptimized: Run `matmul` without optimizations;\noptimize_cpu: Transform `matmul` to a reasonably-optimized version for\n multicore CPU;\noptimize_gpu: Transform `matmul` to a reasonably-optimized version for GPU;\nmkl: Use `matmul_lib` with the MKL library node implementation;\ncublas: Use `matmul_lib` with the CUBLAS library node implementation.''')\n parser.add_argument('--noverify',\n dest='verify',\n action='store_false',\n help=\"If set, skips numpy verification.\",\n default=True)\n\n args = vars(parser.parse_args())\n version = args[\"version\"]\n\n # Prepare data with numpy\n m = args[\"M\"]\n k = args[\"K\"]\n n = args[\"N\"]\n A = np.random.rand(m, k).astype(np_dtype)\n B = np.random.rand(k, n).astype(np_dtype)\n C = np.zeros((m, n), dtype=np_dtype)\n\n print(f'Matrix multiplication {m}x{k}x{n} (version: {version})')\n\n if version == 'unoptimized':\n # Simply call the program to run it\n matmul(A, B, C)\n elif version.startswith('optimize_'):\n # Get the SDFG from the program\n sdfg: dace.SDFG = matmul.to_sdfg()\n # Call transformations to optimize\n if version == 'optimize_cpu':\n optimize_for_cpu(sdfg, m, n, k)\n elif version == 'optimize_gpu':\n optimize_for_gpu(sdfg, m, n, k)\n # Invoke the SDFG to run the optimized program (notice that now we must\n # also directly feed in the symbols)\n sdfg(A=A, B=B, C=C, M=m, N=n, K=k)\n elif version == 'mkl':\n # Set default implementation to MKL\n dace.libraries.blas.default_implementation = 'MKL'\n # Call program\n C = matmul_lib(A, B)\n elif version == 'cublas':\n # Set default implementation to CUBLAS\n dace.libraries.blas.default_implementation = 'cuBLAS'\n # Call program\n C = matmul_lib(A, B)\n elif version == 'fpga_naive':\n matmul = matmul.to_sdfg()\n matmul.apply_transformations(FPGATransformSDFG)\n matmul(A=A, B=B, C=C, N=n, K=k, M=m)\n elif version == 'fpga_systolic':\n dace.libraries.blas.default_implementation = 'FPGA1DSystolic'\n C = matmul_lib(A, B)\n else:\n raise ValueError('Invalid version %s' % version)\n\n if args[\"verify\"]:\n expected = A @ B\n diff = np.linalg.norm(C - expected) / (m * n)\n print('Difference:', diff)\n exit(0 if diff <= 1e-6 else 1)\n" ]
[ [ "numpy.ndarray", "numpy.linalg.norm", "numpy.random.rand", "numpy.zeros" ] ]
artjoms-formulevics/portfolio-builder
[ "d9aa593d52594b795691f7893bd86740ff0eec84" ]
[ "portfolio-theory/updating_stock_prices.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 1 10:35:28 2020\n\n@author: afo\n\"\"\"\n\nfrom os import listdir\nfrom os.path import isfile, join, abspath\nimport os\nfrom inspect import getsourcefile\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport pandas_datareader as pdr\n\n# Function gathers latest ticker data for selected portfolios\ndef update_prices():\n \n p = abspath(getsourcefile(lambda:0))\n p = p.rsplit('/', 1)[0]\n os.chdir(p)\n print('Working Directory is: %s' % os.getcwd())\n \n file_path = p+ '/results_2015-2019/' # !!! Editable Folder with files with weights\n start_time = '2020-01-01' # !!! start date, editable\n end_time = (datetime.today() - timedelta(days=1)).strftime('%Y-%m-%d') # last day = day before today\n \n # Lists of files with portfolio weights\n files = [f for f in listdir(file_path) if isfile(join(file_path, f))]\n files = [f for f in files if f.startswith('portfolio_weights')]\n files = [file_path+f for f in files]\n \n totals = []\n \n for i in range(0, len(files)):\n \n portfolio = pd.read_csv(files[i], index_col=0).iloc[:,0:5]\n \n tickers = portfolio.iloc[:,1].tolist() # tickers inside portfolio (they will be updated)\n \n # Getting stock data (maybe re-do to for loop, if there be problems with memory)\n temp = pdr.DataReader(tickers, 'yahoo', start_time, end_time)['Close']\n \n weights = portfolio['shares_bought'].to_frame().T\n weights.columns = tickers\n \n temp = temp.mul(weights.values) # recalculate each ticker according to weight in portfolio\n temp['total'] = temp.sum(axis=1)\n \n \n # Getting the S&P500 (benchmark)\n data = pdr.DataReader('^GSPC', 'yahoo', start_time, end_time)['Close']\n data = data.rename('SP500')\n data = data.to_frame()\n \n data = data.join(temp)\n del temp\n \n \n # Rearrange cols\n cols = data.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n data = data[cols]\n \n # Get paths & filenames for saving with replacing old csv files\n n = files[i].split('_')[-1]\n s = file_path + 'portfolio_ticker_' + n\n \n data.to_csv(s)\n \n # Create one total file with comparison of all of portfolios\n totals.append(data['total'].to_frame())\n totals[i] = totals[i].rename(columns={'total':'portfolio_'+n.split('.')[0]})\n \n \n total = pd.concat(totals, axis=1)\n \n total.to_excel(file_path+'resulting_portfolios.xlsx')\n \n \nupdate_prices()\n" ]
[ [ "pandas.read_csv", "pandas.concat" ] ]
mundanePeo/faceRecognition
[ "f3340343a8448372e1031b16ba3c8928419bb9e6" ]
[ "App/exts/face_sdk/api_usage/face_feature.py" ]
[ "\"\"\"\r\n@author: mjs\r\n@based: JiXuan Xu, Jun Wang\r\n\"\"\"\r\n\r\nimport yaml\r\nimport cv2\r\nimport numpy as np\r\n\r\nfrom .logFile import logger\r\nfrom ..core.model_loader.face_recognition.FaceRecModelLoader import FaceRecModelLoader\r\nfrom ..core.model_handler.face_recognition.FaceRecModelHandler import FaceRecModelHandler\r\n\r\nwith open('config/model_conf.yaml') as f:\r\n model_conf = yaml.load(f)\r\n \r\nif __name__ == '__main__':\r\n # common setting for all model, need not modify.\r\n model_path = 'models'\r\n\r\n # model setting, modified along with model\r\n scene = 'non-mask'\r\n model_category = 'face_recognition'\r\n model_name = model_conf[scene][model_category]\r\n\r\n logger.info('Start to load the face recognition model...')\r\n # load model\r\n try:\r\n faceRecModelLoader = FaceRecModelLoader(model_path, model_category, model_name)\r\n except Exception as e:\r\n logger.error('Failed to parse model configuration file!')\r\n logger.error(e)\r\n sys.exit(-1)\r\n else:\r\n logger.info('Successfully parsed the model configuration file model_meta.json!')\r\n \r\n try:\r\n model, cfg = faceRecModelLoader.load_model()\r\n except Exception as e:\r\n logger.error('Model loading failed!')\r\n logger.error(e)\r\n sys.exit(-1)\r\n else:\r\n logger.info('Successfully loaded the face recognition model!')\r\n\r\n # read image\r\n image_path = 'api_usage/test_images/test1_cropped.jpg'\r\n image = cv2.imread(image_path)\r\n faceRecModelHandler = FaceRecModelHandler(model, 'cuda:0', cfg)\r\n\r\n try:\r\n feature = faceRecModelHandler.inference_on_image(image)\r\n except Exception as e:\r\n logger.error('Failed to extract facial features!')\r\n logger.error(e)\r\n sys.exit(-1)\r\n else:\r\n logger.info('Successfully extracted facial features!')\r\n\r\n np.save('api_usage/temp/test1_feature.npy', feature)\r\n" ]
[ [ "numpy.save" ] ]
JackBenny39/mmabm
[ "e79d91232016167bff914495ee63e18063a1697b" ]
[ "tests/testLearningMM.py" ]
[ "import random\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom mmabm.shared import Side, OType\r\n\r\nfrom mmabm.learner import MarketMakerL\r\n\r\n\r\nclass TestTrader(unittest.TestCase):\r\n \r\n def setUp(self):\r\n self.l1 = self._makeMML(3001, 1)\r\n \r\n self.q1 = {'order_id': 1, 'timestamp': 1, 'type': OType.ADD, 'quantity': 1, 'side': Side.BID,\r\n 'price': 125}\r\n \r\n self.q1_buy = {'order_id': 1,'timestamp': 2, 'type': OType.ADD, 'quantity': 1, 'side': Side.BID,\r\n 'price': 50}\r\n self.q2_buy = {'order_id': 2, 'timestamp': 3, 'type': OType.ADD, 'quantity': 1, 'side': Side.BID,\r\n 'price': 50}\r\n self.q3_buy = {'order_id': 1, 'timestamp': 4, 'type': OType.ADD, 'quantity': 3, 'side': Side.BID,\r\n 'price': 49}\r\n self.q4_buy = {'order_id': 1, 'timestamp': 5, 'type': OType.ADD, 'quantity': 3, 'side': Side.BID,\r\n 'price': 47}\r\n self.q1_sell = {'order_id': 3, 'timestamp': 2, 'type': OType.ADD, 'quantity': 1, 'side': Side.ASK,\r\n 'price': 52}\r\n self.q2_sell = {'order_id': 4, 'timestamp': 3, 'type': OType.ADD, 'quantity': 1, 'side': Side.ASK,\r\n 'price': 52}\r\n self.q3_sell = {'order_id': 2, 'timestamp': 4, 'type': OType.ADD, 'quantity': 3, 'side': Side.ASK,\r\n 'price': 53}\r\n self.q4_sell = {'order_id': 2, 'timestamp': 5, 'type': OType.ADD, 'quantity': 3, 'side': Side.ASK,\r\n 'price': 55}\r\n \r\n def _makeMML(self, tid, arrInt):\r\n '''\r\n Two sets of market descriptors: arrival count and order imbalance (net signed order flow)\r\n arrival count: 16 bits, 8 for previous period and 8 for the previous 5 periods:\r\n previous period -> one bit each for > 0, 1, 2, 3, 4, 6, 8, 12\r\n previous 5 periods -> one bit each for > 0, 1, 2, 4, 8, 16, 32, 64\r\n order imbalance: 24 bits, 12 for previous period and 12 for previous 5 periods:\r\n previous period -> one bit each for < -8, -4, -3, -2, -1, 0 and > 0, 1, 2, 3, 4, 8\r\n previous 5 periods -> one bit each for < -16, -8, -6, -4, -2, 0 and > 0, 2, 4, 6, 8, 16\r\n \r\n The market maker has a set of predictors (condition/forecast rules) where the condition\r\n matches the market descriptors (i.e., the market state) and the forecasts are used as inputs\r\n to the market maker decision making.\r\n Each market condition is a bit string that coincides with market descriptors with the\r\n additional possibility of \"don't care\" (==2). \r\n Each market condition has an associated forecast\r\n arrival count: 5 bits -> 2^5 - 1 = 31 for a range of 0 - 31\r\n order imbalance: 6 bits -> lhs bit is +1/-1 and 2^5 - 1 = 31 for a range of -31 - +31\r\n \r\n Each market maker receives 100 genes for each of the two sets of market descriptors and\r\n 25 genes for the arrival forecast action rule.\r\n Examples:\r\n arrival count: 1111100011111100 -> >4 for previous period and >8 for previous 5 periods\r\n arrival count gene -> 2222102222221122: 01010 \r\n this gene matches on the \"do care\" (0 or 1) bits and has \"don't care\" for the remaining\r\n bits. It forecasts an arrival count of 10 (0*16 + 1*8 + 0*4 + 1*2 + 0*1).\r\n order imbalance: 011111000000011111000000 - < -4 for previous period and < -8 for previous\r\n 5 periods\r\n order imbalance gene: 222221022222222122222012: 010010\r\n this gene does not match the market state in position 23 and forecasts an order\r\n imbalance of +18 (+1*(1*16 + 0*8 + 0*4 + 1*2 + 0*1))\r\n \r\n The arrival count forecast acts as a condition/action rule where the condition matches the\r\n arrival count forecast and the action adjusts the bid and ask prices:\r\n arrival count forecast: 5 bits -> 2^5 - 1 = 31 for a range of 0 - 31\r\n action: 4 bits -> lhs bit is +1/-1 and 2^3 - 1 = 7 for a range of -7 - +7\r\n Example:\r\n arrival count forecast -> 01010\r\n arrival count gene -> 02210: 0010\r\n this gene matches the arrival count forecast and adjusts the bid (or ask) by (+1*(0*4 + 1*2 + 0*1) = +2.\r\n '''\r\n random.seed(39)\r\n np.random.seed(39)\r\n gene_n1 = 100\r\n gene_n2 = 25\r\n arr_cond_n = 16\r\n oi_cond_n = 24\r\n spr_cond_n = 5\r\n arr_fcst_n = 5\r\n oi_fcst_n = 6\r\n spr_adj_n = 4\r\n probs = [0.05, 0.05, 0.9]\r\n \r\n arr_genes = {'2' * arr_cond_n: '0' * arr_fcst_n}\r\n oi_genes = {'2' * oi_cond_n: '0' * oi_fcst_n}\r\n spread_genes = {'2' * spr_cond_n: '0' * spr_adj_n}\r\n genes = tuple([oi_genes, arr_genes, spread_genes])\r\n while len(arr_genes) < gene_n1:\r\n gk = ''.join(str(x) for x in np.random.choice(np.arange(0, 3), arr_cond_n, p=probs))\r\n gv = ''.join(str(x) for x in np.random.choice(np.arange(0, 2), arr_fcst_n))\r\n arr_genes.update({gk: gv})\r\n while len(oi_genes) < gene_n1:\r\n gk = ''.join(str(x) for x in np.random.choice(np.arange(0, 3), oi_cond_n, p=probs))\r\n gv = ''.join(str(x) for x in np.random.choice(np.arange(0, 2), oi_fcst_n))\r\n oi_genes.update({gk: gv})\r\n while len(spread_genes) < gene_n2:\r\n gk = ''.join(str(x) for x in np.random.choice(np.arange(0, 3), spr_cond_n, p=probs))\r\n gv = ''.join(str(x) for x in np.random.choice(np.arange(0, 2), spr_adj_n))\r\n spread_genes.update({gk: gv})\r\n maxq = 5\r\n a = b = 1\r\n c = -1\r\n keeper = 0.8\r\n mutate_pct = 0.03\r\n genetic_int = 250\r\n return MarketMakerL(tid, maxq, arrInt, a, b, c, genes, keeper, mutate_pct, genetic_int)\r\n \r\n ''' Strategy Construction Tests '''\r\n def test_make_oi_strat2(self):\r\n ''' The OI strat has 100 genes each with 24 bits; one strat is all 2 '''\r\n self.assertEqual(self.l1._oi_len, 24)\r\n self.assertEqual(len(self.l1._oi_strat), 100)\r\n self.assertTrue('2' * 24 in self.l1._oi_strat.keys())\r\n #print(self.l1._oi_strat, self.l1._oi_len, len(self.l1._arr_strat))\r\n \r\n def test_make_arr_strat2(self):\r\n ''' The arr strat has 100 genes each with 16 bits; one strat is all 2 '''\r\n self.assertEqual(self.l1._arr_len, 16)\r\n self.assertEqual(len(self.l1._arr_strat), 100)\r\n self.assertTrue('2' * 16 in self.l1._arr_strat.keys())\r\n #print(self.l1._arr_strat, self.l1._arr_len, len(self.l1._oi_strat))\r\n \r\n def test_make_spread_strat2(self):\r\n ''' The Spread Adj strat has 25 genes each with 5 bits; one strat is all 2 '''\r\n self.assertEqual(self.l1._spr_len, 5)\r\n self.assertEqual(len(self.l1._spradj_strat), 25)\r\n self.assertTrue('2' * 5 in self.l1._spradj_strat.keys())\r\n #print(self.l1._spradj_strat, self.l1._spr_len, len(self.l1._spradj_strat))\r\n \r\n @unittest.skip('Takes too long to run every time')\r\n def test_make_oi_strat(self):\r\n ''' Test for proper conversion from bitstring to integer '''\r\n for i in self.l1._oi_strat.keys():\r\n with self.subTest(i=i):\r\n self.assertEqual(int(self.l1._oi_strat[i]['action'][1:], 2), abs(self.l1._oi_strat[i]['strategy']))\r\n if self.l1._oi_strat[i]['strategy'] != 0:\r\n self.assertEqual(int(self.l1._oi_strat[i]['action'][0]), self.l1._oi_strat[i]['strategy']>0)\r\n self.assertEqual(self.l1._oi_strat[i]['accuracy'], [0, 0, 1000])\r\n \r\n @unittest.skip('Takes too long to run every time') \r\n def test_make_arr_strat(self):\r\n ''' Test for proper conversion from bitstring to integer '''\r\n for i in self.l1._arr_strat.keys():\r\n with self.subTest(i=i):\r\n self.assertEqual(int(self.l1._arr_strat[i]['action'], 2), self.l1._arr_strat[i]['strategy'])\r\n self.assertEqual(self.l1._arr_strat[i]['accuracy'], [0, 0, 1000])\r\n \r\n @unittest.skip('Takes too long to run every time') \r\n def test_make_spread_strat(self):\r\n ''' Test for proper conversion from bitstring to integer '''\r\n # spread strategy\r\n for i in self.l1._spradj_strat.keys():\r\n with self.subTest(i=i):\r\n self.assertEqual(int(self.l1._spradj_strat[i]['action'][1:], 2), abs(self.l1._spradj_strat[i]['strategy']))\r\n if self.l1._spradj_strat[i]['strategy'] != 0:\r\n self.assertEqual(int(self.l1._spradj_strat[i]['action'][0]), self.l1._spradj_strat[i]['strategy']>0)\r\n self.assertEqual(self.l1._spradj_strat[i]['rr_spread'], [0, 0, 0])\r\n \r\n ''' Strategy Matching Tests '''\r\n def test_match_oi_strat2(self):\r\n ''' With seeds == 39, '221212222222222222020222' is the sole winning strategy with a max strength == 4 '''\r\n #oi_state is 24 bits\r\n signal = '011111000000011111000000'\r\n self.l1._match_oi_strat2(signal)\r\n self.assertEqual(self.l1._current_oi_strat, '221212222222222222020222')\r\n self.assertTrue(all([(self.l1._current_oi_strat[x] == signal[x] or self.l1._current_oi_strat[x] == '2') for x in range(self.l1._oi_len)]))\r\n self.assertEqual(sum([self.l1._current_oi_strat[x] == signal[x] for x in range(self.l1._oi_len)]), 4)\r\n # Another winner with strength == 4 could be '212212222222222222020222'\r\n self.l1._oi_strat['212212222222222222020222'] = {'action': 'xxxxx', 'strategy': 999, 'accuracy': [0, 0, 1000]}\r\n # Set '221212222222222222020222' accuracy to less than new strat accuracy\r\n self.l1._oi_strat['221212222222222222020222']['accuracy'][-1] = 999\r\n self.l1._match_oi_strat2(signal)\r\n self.assertEqual(self.l1._current_oi_strat, '212212222222222222020222')\r\n self.assertTrue(all([(self.l1._current_oi_strat[x] == signal[x] or self.l1._current_oi_strat[x] == '2') for x in range(self.l1._oi_len)]))\r\n self.assertEqual(sum([self.l1._current_oi_strat[x] == signal[x] for x in range(self.l1._oi_len)]), 4)\r\n # If they had the same strength and accuracy, only one would be returned\r\n self.l1._oi_strat['221212222222222222020222']['accuracy'][-1] = 1000\r\n self.l1._match_oi_strat2(signal)\r\n self.assertEqual('221212222222222222020222', self.l1._current_oi_strat)\r\n\r\n def test_match_arr_strat2(self):\r\n ''' With seeds == 39, '1222102221222222' is the winning strategy with a max strength == 4 '''\r\n signal = '1111100011111100'\r\n self.l1._match_arr_strat2(signal)\r\n self.assertEqual(self.l1._current_arr_strat, '1222102221222222')\r\n self.assertTrue(all([(self.l1._current_arr_strat[x] == signal[x] or self.l1._current_arr_strat[x] == '2') for x in range(self.l1._arr_len)]))\r\n self.assertEqual(sum([self.l1._current_arr_strat[x] == signal[x] for x in range(self.l1._arr_len)]), 4)\r\n # Another winner with strength == 4 could be '2122102221222222'\r\n self.l1._arr_strat['2122102221222222'] = {'action': 'xxxxx', 'strategy': 999, 'accuracy': [0, 0, 1000]}\r\n # Set '1222102221222222' accuracy to less than new strat accuracy\r\n self.l1._arr_strat['1222102221222222']['accuracy'][-1] = 999\r\n self.l1._match_arr_strat2(signal)\r\n self.assertEqual(self.l1._current_arr_strat, '2122102221222222')\r\n self.assertTrue(all([(self.l1._current_arr_strat[x] == signal[x] or self.l1._current_arr_strat[x] == '2') for x in range(self.l1._arr_len)]))\r\n self.assertEqual(sum([self.l1._current_arr_strat[x] == signal[x] for x in range(self.l1._arr_len)]), 4)\r\n # If they had the same strength and accuracy, only one would be returned\r\n self.l1._arr_strat['1222102221222222']['accuracy'][-1] = 1000\r\n self.l1._match_arr_strat2(signal)\r\n self.assertEqual('1222102221222222', self.l1._current_arr_strat)\r\n\r\n def test_match_spread_strat(self):\r\n ''' With seeds == 39, ['21220', '22020', '02022', '21212', '22210', '02212'] are the winning strategies with a max strength == 2 '''\r\n signal = '01010'\r\n self.l1._match_spread_strat(signal)\r\n for j in ['21220', '22020', '02022', '21212', '22210', '02212']:\r\n self.assertTrue(j in self.l1._current_spradj_strat)\r\n for i in self.l1._current_spradj_strat:\r\n with self.subTest(i=i):\r\n self.assertTrue(all([(i[x] == signal[x] or i[x] == '2') for x in range(self.l1._spr_len)]))\r\n self.assertEqual(sum([i[x] == signal[x] for x in range(self.l1._spr_len)]), 2)\r\n # Winner '01222' - set rr_spread higher\r\n self.l1._spradj_strat['01222'] = {'action': 'xxxxx', 'strategy': 999, 'rr_spread': [0, 0, 1]}\r\n self.l1._match_spread_strat(signal)\r\n self.assertEqual(len(self.l1._current_spradj_strat), 1)\r\n self.assertEqual(self.l1._current_spradj_strat[0], '01222')\r\n self.assertTrue(all([(self.l1._current_spradj_strat[0][x] == signal[x] or self.l1._current_spradj_strat[0][x] == '2') for x in range(self.l1._spr_len)]))\r\n self.assertEqual(sum([self.l1._current_spradj_strat[0][x] == signal[x] for x in range(self.l1._spr_len)]), 2)\r\n \r\n ''' Accuracy/Profitability Update Tests '''\r\n def test_update_oi_acc(self):\r\n self.l1._oi_strat['221212222222222222020222']['accuracy'][0] = 10\r\n self.l1._oi_strat['221212222222222222020222']['accuracy'][1] = 10\r\n self.l1._oi_strat['221212222222222222020222']['accuracy'][-1] = 1000\r\n self.l1._oi_strat['221212222222222222020222']['strategy'] = 4\r\n self.l1._current_oi_strat = '221212222222222222020222'\r\n actual = 6\r\n self.l1._update_oi_acc(actual)\r\n self.assertListEqual(self.l1._oi_strat['221212222222222222020222']['accuracy'], [12, 11, 1000 - 12/11])\r\n\r\n def test_update_arr_acc(self):\r\n self.l1._arr_strat['1222102221222222']['accuracy'][0] = 10\r\n self.l1._arr_strat['1222102221222222']['accuracy'][1] = 10\r\n self.l1._arr_strat['1222102221222222']['accuracy'][-1] = 1000\r\n self.l1._arr_strat['1222102221222222']['strategy'] = 4\r\n self.l1._current_arr_strat = '1222102221222222'\r\n actual = 6\r\n self.l1._update_arr_acc(actual)\r\n self.assertListEqual(self.l1._arr_strat['1222102221222222']['accuracy'], [12, 11, 1000 - 12/11])\r\n\r\n def test_update_rspr(self):\r\n self.l1._spradj_strat['21220']['rr_spread'][0] = 10000\r\n self.l1._spradj_strat['21220']['rr_spread'][1] = 1000\r\n self.l1._spradj_strat['21220']['rr_spread'][-1] = 10\r\n self.l1._current_spradj_strat = ['21220']\r\n mid = 1000\r\n self.l1._last_buy_prices = [998, 999]\r\n self.l1._last_sell_prices = [1001, 1002]\r\n self.l1._update_rspr(mid)\r\n self.assertListEqual(self.l1._spradj_strat['21220']['rr_spread'], [10006, 1004, 10006/1004])\r\n \r\n ''' Order Construction Tests ''' \r\n def test_make_add_quote(self):\r\n ''' Takes 4 inputs, increments the quote sequence and generates a dict '''\r\n time = 1\r\n side = Side.ASK\r\n price = 125\r\n quantity = 5\r\n self.assertFalse(self.l1._quote_sequence)\r\n expected = {'order_id': 1, 'trader_id': self.l1.trader_id, 'timestamp': 1, 'type': OType.ADD, \r\n 'quantity': quantity, 'side': Side.ASK, 'price': 125}\r\n self.assertDictEqual(self.l1._make_add_quote(time, side, price, quantity), expected)\r\n \r\n def test_make_cancel_quote(self):\r\n ''' Takes a quote and current timestamp, resets the timestamp to current,\r\n and updates type to CANCEL '''\r\n self.q1['trader_id'] = self.l1.trader_id\r\n expected = {'order_id': 1, 'trader_id': self.l1.trader_id, 'timestamp': 2, 'type': OType.CANCEL, \r\n 'quantity': 1, 'side': Side.BID, 'price': 125}\r\n self.assertDictEqual(self.l1._make_cancel_quote(self.q1, 2), expected)\r\n \r\n ''' Orderbook Bookkeeping Tests'''\r\n\r\n def test_add_order(self):\r\n '''\r\n add_order_to_book() impacts _bid_book and _bid_book_prices or _ask_book and _ask_book_prices\r\n Add two buy orders, then two sell orders\r\n '''\r\n # 2 buy orders\r\n self.assertFalse(self.l1._bid_book_prices)\r\n self.assertFalse(self.l1._bid_book)\r\n self.l1._add_order(self.q1_buy)\r\n self.assertTrue(50 in self.l1._bid_book_prices)\r\n self.assertTrue(50 in self.l1._bid_book.keys())\r\n self.assertEqual(self.l1._bid_book[50]['num_orders'], 1)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 1)\r\n self.assertEqual(self.l1._bid_book[50]['order_ids'][0], 1)\r\n del self.q1_buy['type']\r\n self.assertDictEqual(self.l1._bid_book[50]['orders'][1], self.q1_buy)\r\n self.l1._add_order(self.q2_buy)\r\n self.assertEqual(self.l1._bid_book[50]['num_orders'], 2)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 2)\r\n self.assertEqual(self.l1._bid_book[50]['order_ids'][1], 2)\r\n del self.q2_buy['type']\r\n self.assertDictEqual(self.l1._bid_book[50]['orders'][2], self.q2_buy)\r\n # 2 sell orders\r\n self.assertFalse(self.l1._ask_book_prices)\r\n self.assertFalse(self.l1._ask_book)\r\n self.l1._add_order(self.q1_sell)\r\n self.assertTrue(52 in self.l1._ask_book_prices)\r\n self.assertTrue(52 in self.l1._ask_book.keys())\r\n self.assertEqual(self.l1._ask_book[52]['num_orders'], 1)\r\n self.assertEqual(self.l1._ask_book[52]['size'], 1)\r\n self.assertEqual(self.l1._ask_book[52]['order_ids'][0], 3)\r\n del self.q1_sell['type']\r\n self.assertDictEqual(self.l1._ask_book[52]['orders'][3], self.q1_sell)\r\n self.l1._add_order(self.q2_sell)\r\n self.assertEqual(self.l1._ask_book[52]['num_orders'], 2)\r\n self.assertEqual(self.l1._ask_book[52]['size'], 2)\r\n self.assertEqual(self.l1._ask_book[52]['order_ids'][1], 4)\r\n del self.q2_sell['type']\r\n self.assertDictEqual(self.l1._ask_book[52]['orders'][4], self.q2_sell)\r\n\r\n def test_remove_order(self):\r\n '''\r\n _remove_order() impacts _bid_book and _bid_book_prices or _ask_book and _ask_book_prices\r\n Add two orders, remove the second order twice\r\n '''\r\n # buy orders\r\n self.l1._add_order(self.q1_buy)\r\n self.l1._add_order(self.q2_buy)\r\n self.assertTrue(50 in self.l1._bid_book_prices)\r\n self.assertTrue(50 in self.l1._bid_book.keys())\r\n self.assertEqual(self.l1._bid_book[50]['num_orders'], 2)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 2)\r\n self.assertEqual(len(self.l1._bid_book[50]['order_ids']), 2)\r\n # remove first order\r\n self.l1._remove_order(Side.BID, 50, 1)\r\n self.assertEqual(self.l1._bid_book[50]['num_orders'], 1)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 1)\r\n self.assertEqual(len(self.l1._bid_book[50]['order_ids']), 1)\r\n self.assertFalse(1 in self.l1._bid_book[50]['orders'].keys())\r\n self.assertTrue(50 in self.l1._bid_book_prices)\r\n # remove second order\r\n self.l1._remove_order(Side.BID, 50, 2)\r\n self.assertFalse(self.l1._bid_book_prices)\r\n self.assertEqual(self.l1._bid_book[50]['num_orders'], 0)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 0)\r\n self.assertEqual(len(self.l1._bid_book[50]['order_ids']), 0)\r\n self.assertFalse(2 in self.l1._bid_book[50]['orders'].keys())\r\n self.assertFalse(50 in self.l1._bid_book_prices)\r\n # remove second order again\r\n self.l1._remove_order(Side.BID, 50, 2)\r\n self.assertFalse(self.l1._bid_book_prices)\r\n self.assertEqual(self.l1._bid_book[50]['num_orders'], 0)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 0)\r\n self.assertEqual(len(self.l1._bid_book[50]['order_ids']), 0)\r\n self.assertFalse(2 in self.l1._bid_book[50]['orders'].keys())\r\n # sell orders\r\n self.l1._add_order(self.q1_sell)\r\n self.l1._add_order(self.q2_sell)\r\n self.assertTrue(52 in self.l1._ask_book_prices)\r\n self.assertTrue(52 in self.l1._ask_book.keys())\r\n self.assertEqual(self.l1._ask_book[52]['num_orders'], 2)\r\n self.assertEqual(self.l1._ask_book[52]['size'], 2)\r\n self.assertEqual(len(self.l1._ask_book[52]['order_ids']), 2)\r\n # remove first order\r\n self.l1._remove_order(Side.ASK, 52, 3)\r\n self.assertEqual(self.l1._ask_book[52]['num_orders'], 1)\r\n self.assertEqual(self.l1._ask_book[52]['size'], 1)\r\n self.assertEqual(len(self.l1._ask_book[52]['order_ids']), 1)\r\n self.assertFalse(3 in self.l1._ask_book[52]['orders'].keys())\r\n self.assertTrue(52 in self.l1._ask_book_prices)\r\n # remove second order\r\n self.l1._remove_order(Side.ASK, 52, 4)\r\n self.assertFalse(self.l1._ask_book_prices)\r\n self.assertEqual(self.l1._ask_book[52]['num_orders'], 0)\r\n self.assertEqual(self.l1._ask_book[52]['size'], 0)\r\n self.assertEqual(len(self.l1._ask_book[52]['order_ids']), 0)\r\n self.assertFalse(4 in self.l1._ask_book[52]['orders'].keys())\r\n self.assertFalse(52 in self.l1._ask_book_prices)\r\n # remove second order again\r\n self.l1._remove_order(Side.ASK, 52, 4)\r\n self.assertFalse(self.l1._ask_book_prices)\r\n self.assertEqual(self.l1._ask_book[52]['num_orders'], 0)\r\n self.assertEqual(self.l1._ask_book[52]['size'], 0)\r\n self.assertEqual(len(self.l1._ask_book[52]['order_ids']), 0)\r\n self.assertFalse(4 in self.l1._ask_book[52]['orders'].keys())\r\n\r\n def test_modify_order(self):\r\n '''\r\n _modify_order() primarily impacts _bid_book or _ask_book \r\n _modify_order() could impact _bid_book_prices or _ask_book_prices if the order results \r\n in removing the full quantity with a call to _remove_order() \r\n Add 1 order, remove partial, then remainder\r\n '''\r\n # Buy order\r\n q1 = {'order_id': 1, 'timestamp': 5, 'type': OType.ADD, 'quantity': 2, 'side': Side.BID, 'price': 50}\r\n self.l1._add_order(q1)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 2)\r\n # remove 1\r\n self.l1._modify_order(Side.BID, 1, 1, 50)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 1)\r\n self.assertEqual(self.l1._bid_book[50]['orders'][1]['quantity'], 1)\r\n self.assertTrue(self.l1._bid_book_prices)\r\n # remove remainder\r\n self.l1._modify_order(Side.BID, 1, 1, 50)\r\n self.assertFalse(self.l1._bid_book_prices)\r\n self.assertEqual(self.l1._bid_book[50]['num_orders'], 0)\r\n self.assertEqual(self.l1._bid_book[50]['size'], 0)\r\n self.assertFalse(1 in self.l1._bid_book[50]['orders'].keys())\r\n # Sell order\r\n q2 = {'order_id': 2, 'timestamp': 5, 'type': OType.ADD, 'quantity': 2, 'side': Side.ASK, 'price': 50}\r\n self.l1._add_order(q2)\r\n self.assertEqual(self.l1._ask_book[50]['size'], 2)\r\n # remove 1\r\n self.l1._modify_order(Side.ASK, 1, 2, 50)\r\n self.assertEqual(self.l1._ask_book[50]['size'], 1)\r\n self.assertEqual(self.l1._ask_book[50]['orders'][2]['quantity'], 1)\r\n self.assertTrue(self.l1._ask_book_prices)\r\n # remove remainder\r\n self.l1._modify_order(Side.ASK, 1, 2, 50)\r\n self.assertFalse(self.l1._ask_book_prices)\r\n self.assertEqual(self.l1._ask_book[50]['num_orders'], 0)\r\n self.assertEqual(self.l1._ask_book[50]['size'], 0)\r\n self.assertFalse(2 in self.l1._ask_book[50]['orders'].keys())\r\n \r\n ''' Trade Handling Tests '''\r\n def test_confirm_trade_local(self):\r\n # _cash_flow and _delta_inv start at 0, _last_buy_prices and last_sell_prices are empty\r\n self.assertFalse(self.l1._last_buy_prices)\r\n self.assertFalse(self.l1._last_sell_prices)\r\n self.assertEqual(self.l1._cash_flow, 0)\r\n self.assertEqual(self.l1._delta_inv, 0)\r\n # add some orders\r\n q1 = {'order_id': 1, 'timestamp': 5, 'type': OType.ADD, 'quantity': 5, 'side': Side.BID, 'price': 995}\r\n q2 = {'order_id': 2, 'timestamp': 5, 'type': OType.ADD, 'quantity': 5, 'side': Side.ASK, 'price': 1005}\r\n self.l1._add_order(q1)\r\n self.l1._add_order(q2)\r\n # Market maker buys\r\n confirm1 = {'timestamp': 20, 'trader': 3001, 'order_id': 1, 'quantity': 1, 'side': Side.BID, 'price': 995}\r\n self.l1.confirm_trade_local(confirm1)\r\n self.assertListEqual(self.l1._last_buy_prices, [995])\r\n self.assertEqual(self.l1._cash_flow, -995/100000)\r\n self.assertEqual(self.l1._delta_inv, 1)\r\n self.assertEqual(self.l1._bid_book[995]['num_orders'], 1)\r\n self.assertEqual(self.l1._bid_book[995]['size'], 4)\r\n confirm2 = {'timestamp': 22, 'trader': 3001, 'order_id': 1, 'quantity': 4, 'side': Side.BID, 'price': 995}\r\n self.l1.confirm_trade_local(confirm2)\r\n self.assertListEqual(self.l1._last_buy_prices, [995, 995])\r\n self.assertEqual(self.l1._cash_flow, -4975/100000)\r\n self.assertEqual(self.l1._delta_inv, 5)\r\n self.assertFalse(self.l1._bid_book_prices)\r\n # Market maker sells\r\n confirm3 = {'timestamp': 20, 'trader': 3001, 'order_id': 2, 'quantity': 1, 'side': Side.ASK, 'price': 1005}\r\n self.l1.confirm_trade_local(confirm3)\r\n self.assertListEqual(self.l1._last_sell_prices, [1005])\r\n self.assertEqual(self.l1._cash_flow, -3970/100000)\r\n self.assertEqual(self.l1._delta_inv, 4)\r\n self.assertEqual(self.l1._ask_book[1005]['num_orders'], 1)\r\n self.assertEqual(self.l1._ask_book[1005]['size'], 4)\r\n confirm4 = {'timestamp': 22, 'trader': 3001, 'order_id': 2, 'quantity': 4, 'side': Side.ASK, 'price': 1005}\r\n self.l1.confirm_trade_local(confirm4)\r\n self.assertListEqual(self.l1._last_sell_prices, [1005, 1005])\r\n self.assertAlmostEqual(self.l1._cash_flow, 50/100000, 4)\r\n self.assertEqual(self.l1._delta_inv, 0)\r\n self.assertFalse(self.l1._ask_book_prices)\r\n\r\n ''' Orderbook Update Tests ''' \r\n def test_update_midpoint(self):\r\n ''' With seeds == 39, '221212222222222222020222' is the sole winning strategy with a max strength == 4 -> action == -3 '''\r\n self.l1._mid = 990\r\n self.l1._delta_inv = 3\r\n #oi_state is 24 bits\r\n oib_signal = '011111000000011111000000'\r\n mid_signal = 1000\r\n self.l1._update_midpoint(oib_signal, mid_signal)\r\n # new mid = mid_signal - 3 + (-1*3)\r\n self.assertEqual(self.l1._mid, 994)\r\n\r\n def test_make_spread(self):\r\n ''' With seeds == 39, '1222102221222222' is the winning arr strategy with a max strength == 4 -> action == '01000' (8)\r\n _match_spread_strat('01000') returns ['21220', '22020', '02022'] with an actions of ['0000', '1100', '1111'] -> \r\n strategy == [0, 4, 7] for an average of 3.67\r\n '''\r\n self.assertFalse(self.l1._ask)\r\n self.assertFalse(self.l1._bid)\r\n self.l1._mid = 1000\r\n arr_signal = '1222102221222222'\r\n vol = 4\r\n self.l1._make_spread(arr_signal, vol)\r\n # spradj = 3.67\r\n # ask = 1000 + round(max(1*4, 1) + 3.67/2) = 1006\r\n # bid = 1000 - round(max(1*4, 1) + 3.67/2) = 994\r\n self.assertEqual(self.l1._bid, 994)\r\n self.assertEqual(self.l1._ask, 1006)\r\n \r\n def test_process_cancels(self):\r\n ''' If desired ask > current best ask, cancel current ask orders with prices < new best ask '''\r\n # Create asks from 1005 - 1035\r\n for p in range(1005, 1036):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.ASK, p, self.l1._maxq))\r\n for p in range(1005, 1036):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n # Create bids from 960 - 990\r\n for p in range(960, 991):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.BID, p, self.l1._maxq))\r\n for p in range(960, 991):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n # case 1a: new ask = 1000, new bid = 995 -> no new cancels\r\n self.l1._ask = 1000\r\n self.l1._bid = 995\r\n self.l1._process_cancels(6)\r\n self.assertFalse(self.l1.cancel_collector)\r\n # case 2a: new ask = 1008 -> cancel 3 prices: 1005, 1006, 1007\r\n self.l1._ask = 1008\r\n self.l1._bid = 995\r\n self.l1._process_cancels(7)\r\n for p in range(1008, 1036):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n for p in range(1005, 1008):\r\n with self.subTest(p=p):\r\n self.assertFalse(p in self.l1._ask_book_prices)\r\n self.assertEqual(len(self.l1.cancel_collector), 3)\r\n # case 2b: new bid = 987 -> cancel 988, 989, 990\r\n self.l1._ask = 1000\r\n self.l1._bid = 987\r\n self.l1._process_cancels(8)\r\n for p in range(960, 988):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n for p in range(988, 991):\r\n with self.subTest(p=p):\r\n self.assertFalse(p in self.l1._bid_book_prices)\r\n self.assertEqual(len(self.l1.cancel_collector), 3)\r\n\r\n ''' Several cases:\r\n 1. _ask > prevailing worst ask (ask book empty due to canceling first)\r\n 2. _ask > prevailing best bid: add new ask orders from _ask and up\r\n 3. _ask <= prevailing best bid: add new ask orders from prevailing best bid+1 and up\r\n 4. _ask == current best ask: check for max size and add size if necessary\r\n Also, price range should always be between best ask + 20 and best ask + 60\r\n '''\r\n def test_update_ask_book1(self):\r\n ''' 1. _ask > prevailing worst ask (ask book empty due to canceling first) '''\r\n self.assertFalse(self.l1._ask_book_prices)\r\n self.l1._ask = 1000\r\n tob_bid = 995\r\n self.l1._update_ask_book(6, tob_bid)\r\n # case 1: Add orders from 1000 -> 1039\r\n for p in range(1000, 1040):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n self.assertEqual(len(self.l1.quote_collector), 40)\r\n\r\n def test_update_ask_book2(self):\r\n ''' 2. _ask > prevailing best bid: add new ask orders from _ask and up '''\r\n # Create asks from 1005 - 1035\r\n for p in range(1005, 1036):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.ASK, p, self.l1._maxq))\r\n for p in range(1005, 1036):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n # case 2: _ask = 1000, best_bid = 995 -> add 5 new prices: 1000 - 1004\r\n self.l1._ask = 1000\r\n tob_bid = 995\r\n self.l1._update_ask_book(6, tob_bid)\r\n for p in range(1000, 1036):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n self.assertEqual(len(self.l1.quote_collector), 5)\r\n \r\n def test_update_ask_book3(self):\r\n ''' 3. _ask <= prevailing best bid: add new ask orders from prevailing best bid+1 and up '''\r\n for p in range(1000, 1036):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.ASK, p, self.l1._maxq))\r\n for p in range(1000, 1036):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n # case 3: _ask = 990 but best_bid = 995 -> add 4 new prices: 996 - 999\r\n self.l1._ask = 990\r\n tob_bid = 995\r\n self.l1._update_ask_book(7, tob_bid)\r\n for p in range(996, 1036):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n for p in range(990, 996):\r\n with self.subTest(p=p):\r\n self.assertFalse(p in self.l1._ask_book_prices)\r\n self.assertEqual(len(self.l1.quote_collector), 4)\r\n \r\n def test_update_ask_book4(self):\r\n ''' 4. _ask == current best ask: check for max size and add size if necessary '''\r\n for p in range(996, 1036):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.ASK, p, self.l1._maxq))\r\n for p in range(996, 1036):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n # case 4: new ask size == 2 -> replenish size to 5 with new add order\r\n self.l1._ask = 996\r\n tob_bid = 990\r\n self.l1._modify_order(Side.ASK, 3, 1, 996)\r\n self.assertEqual(self.l1._ask_book[996]['orders'][1]['quantity'], 2)\r\n self.assertEqual(self.l1._ask_book[996]['size'], 2)\r\n self.assertEqual(self.l1._ask_book[996]['num_orders'], 1)\r\n self.l1.quote_collector.clear() # happens in process_order\r\n self.l1._update_ask_book(8, tob_bid)\r\n self.assertEqual(self.l1._ask_book[996]['size'], 5)\r\n self.assertEqual(self.l1._ask_book[996]['num_orders'], 2)\r\n self.assertEqual(len(self.l1.quote_collector), 1)\r\n \r\n def test_update_ask_book5(self):\r\n ''' Also, price range should always be between best ask + 20 and best ask + 60 '''\r\n # make best ask == 1020 -> add orders to the other end of the book to make 40 prices\r\n for p in range(1020, 1036):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.ASK, p, self.l1._maxq))\r\n for p in range(1020, 1036):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n tob_bid = 990\r\n self.l1._ask = 1020\r\n self.l1._update_ask_book(10, tob_bid)\r\n for p in range(1020, 1059):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n # make best ask == 980 -> cancel orders on the other end of the book to make 40 prices\r\n self.l1._bid = 975\r\n self.l1._ask = 980\r\n tob_bid = 975\r\n self.l1._update_ask_book(10, tob_bid)\r\n for p in range(980, 1019):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n for p in range(1020, 1059):\r\n with self.subTest(p=p):\r\n self.assertFalse(p in self.l1._ask_book_prices)\r\n \r\n ''' Several cases:\r\n 1. _bid < prevailing worst bid (bid book empty due to canceling first)\r\n 2. _bid < prevailing best ask: add new bid orders from _bid and down\r\n 3. _bid >= prevailing best ask: add new bid orders from prevailing best ask-1 and down\r\n 4. _bid == current best bid: check for max size and add size if necessary\r\n Also, price range should always be between best bid - 20 and best bid - 60\r\n '''\r\n def test_update_bid_book1(self):\r\n ''' 1. _bid < prevailing worst bid (bid book empty due to canceling first) '''\r\n self.assertFalse(self.l1._bid_book_prices)\r\n self.l1._bid = 995\r\n tob_ask = 1000\r\n self.l1._update_bid_book(6, tob_ask)\r\n # case 1: Add orders from 956 -> 995\r\n for p in range(956, 996):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n self.assertEqual(len(self.l1.quote_collector), 40)\r\n\r\n def test_update_bid_book2(self):\r\n ''' 2. _bid < prevailing best ask: add new bid orders from _bid and down '''\r\n # Create bids from 960 - 990\r\n for p in range(960, 991):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.BID, p, self.l1._maxq))\r\n for p in range(960, 991):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n # case 2: _bid = 995, best_ask = 1000 -> add 5 new prices: 991 - 995\r\n self.l1._bid = 995\r\n tob_ask = 1000\r\n self.l1._update_bid_book(6, tob_ask)\r\n for p in range(960, 996):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n self.assertEqual(len(self.l1.quote_collector), 5)\r\n \r\n def test_update_bid_book3(self):\r\n ''' _bid >= prevailing best ask: add new bid orders from prevailing best ask-1 and down '''\r\n # Create bids from 960 - 995\r\n for p in range(960, 996):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.BID, p, self.l1._maxq))\r\n for p in range(960, 996):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n # case 2: _bid = 1000, but tob_ask = 988 -> add 2 prices: 996, 997\r\n self.l1._bid = 1000\r\n tob_ask = 998\r\n self.l1._update_bid_book(7, tob_ask)\r\n for p in range(960, 998):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n for p in range(998, 1000):\r\n with self.subTest(p=p):\r\n self.assertFalse(p in self.l1._bid_book_prices)\r\n self.assertEqual(len(self.l1.quote_collector), 2)\r\n \r\n def test_update_bid_book4(self):\r\n ''' 4. _bid == current best bid: check for max size and add size if necessary '''\r\n # case 4: new bid size == 2 -> replenish size to 5 with new add order\r\n # Create bids from 960 - 995\r\n for p in range(960, 996):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.BID, p, self.l1._maxq))\r\n for p in range(960, 996):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n self.l1._bid = 995\r\n tob_ask = 1000\r\n self.l1._modify_order(Side.BID, 3, 36, 995)\r\n self.assertEqual(self.l1._bid_book[995]['orders'][36]['quantity'], 2)\r\n self.assertEqual(self.l1._bid_book[995]['size'], 2)\r\n self.assertEqual(self.l1._bid_book[995]['num_orders'], 1)\r\n self.l1.quote_collector.clear() # happens in process_order\r\n self.l1._update_bid_book(8, tob_ask)\r\n self.assertEqual(self.l1._bid_book[995]['size'], 5)\r\n self.assertEqual(self.l1._bid_book[995]['num_orders'], 2)\r\n self.assertEqual(len(self.l1.quote_collector), 1)\r\n \r\n def test_update_bid_book5(self):\r\n ''' Also, price range should always be between best bid - 20 and best bid - 60 '''\r\n # make best bid == 975 -> add orders to the other end of the book to make 40 prices\r\n for p in range(960, 976):\r\n self.l1._add_order(self.l1._make_add_quote(35, Side.BID, p, self.l1._maxq))\r\n for p in range(960, 976):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n self.l1._bid = 975\r\n tob_ask = 1040\r\n self.l1._update_bid_book(9, tob_ask)\r\n for p in range(936, 975):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n # make best bid == 1030 -> cancel orders on the other end of the book to make 40 prices\r\n self.l1._bid = 1030\r\n tob_ask = 1040\r\n self.l1._update_bid_book(10, tob_ask)\r\n for p in range(991, 1031):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n for p in range(961, 991):\r\n with self.subTest(p=p):\r\n self.assertFalse(p in self.l1._bid_book_prices)\r\n \r\n def test_seed_book(self):\r\n ask = 998\r\n bid = 990\r\n step = 20\r\n self.l1.seed_book(step, ask, bid)\r\n self.assertEqual(self.l1._mid, 994)\r\n self.assertTrue(990 in self.l1._bid_book_prices)\r\n self.assertTrue(998 in self.l1._ask_book_prices)\r\n self.assertEqual(self.l1._bid, 990)\r\n self.assertEqual(self.l1._ask, 998)\r\n self.assertEqual(len(self.l1.quote_collector), 2)\r\n\r\n def test_process_signals(self):\r\n ''' Test process_signal1 and process_signal2 '''\r\n signal = {'oibv': 6, 'arrv': 8, 'mid': 1000, 'oib': '011111000000011111000000',\r\n 'arr': '1222102221222222', 'vol': 4}\r\n \r\n self.l1._current_oi_strat = '222222222222222222222222'\r\n self.l1._current_arr_strat = '2222222222222222'\r\n self.l1._current_spradj_strat = ['21220']\r\n self.l1._last_buy_prices = [998, 999]\r\n self.l1._last_sell_prices = [1001, 1002]\r\n self.l1._delta_inv = 3\r\n \r\n ask = 1015 # stub quotes?\r\n bid = 985 # stub quotes?\r\n step = 20\r\n \r\n self.l1.seed_book(step, ask, bid)\r\n self.assertEqual(self.l1._bid, 985)\r\n self.assertEqual(self.l1._ask, 1015)\r\n self.l1.process_signal1(44, signal)\r\n \r\n # Step 1: update scores for predictors:\r\n self.assertListEqual(self.l1._oi_strat['222222222222222222222222']['accuracy'], [1, 1, 999.0])\r\n self.assertListEqual(self.l1._arr_strat['2222222222222222']['accuracy'], [22, 1, 978.0])\r\n self.assertListEqual(self.l1._spradj_strat['21220']['rr_spread'], [6, 4, 1.5])\r\n # Step 2: update the midpoint:\r\n self.assertEqual(self.l1._mid, 994)\r\n # Step 3: update spread: using updated spradj_strat = '21220', adjustment == 0 -> bid == 990, ask == 998\r\n self.assertEqual(self.l1._bid, 990)\r\n self.assertEqual(self.l1._ask, 998)\r\n # Step 4: process cancels: there aren't any\r\n # Step 5: update the book\r\n tob_bid = 988\r\n tob_ask = 1000\r\n self.l1.process_signal2(step, tob_bid, tob_ask)\r\n for p in range(951, 991):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._bid_book_prices)\r\n for p in range(998, 1038):\r\n with self.subTest(p=p):\r\n self.assertTrue(p in self.l1._ask_book_prices)\r\n self.assertEqual(len(self.l1.quote_collector), 78)\r\n # Step 6: update cash flow collector, reset inventory, clear recent prices\r\n self.assertDictEqual(self.l1.cash_flow_collector[-1], {'mmid': 3001, 'timestamp': 20, 'cash_flow': 0, 'delta_inv': 3})\r\n self.assertFalse(self.l1._delta_inv)\r\n self.assertFalse(self.l1._last_buy_prices)\r\n self.assertFalse(self.l1._last_sell_prices)\r\n @unittest.skip('for now')\r\n def test_find_winners_oi(self):\r\n for j, k in enumerate(self.l1._oi_strat.keys()):\r\n self.l1._oi_strat[k]['accuracy'][2] = 1000 - j\r\n self.l1._find_winners()\r\n oi_accs = [v['accuracy'][2] for v in self.l1._oi_strat.values()]\r\n for j in range(921, 1001):\r\n with self.subTest(j=j):\r\n self.assertTrue(j in oi_accs)\r\n self.assertEqual(min(oi_accs), 921)\r\n self.assertEqual(max(oi_accs), 1000)\r\n self.l2 = self._makeMML(3002, 1)\r\n for j, k in enumerate(self.l2._oi_strat.keys()):\r\n self.l2._oi_strat[k]['accuracy'][2] = 1000 - j\r\n if j % 5 == 0:\r\n self.l2._oi_strat[k]['accuracy'][1] = j\r\n self.l2._find_winners()\r\n oi_accs = [v['accuracy'] for v in self.l2._oi_strat.values()]\r\n for j in range(0, 100, 5):\r\n with self.subTest(j=j):\r\n self.assertTrue(j in [a[1] for a in oi_accs])\r\n\r\n def test_get_winners_oi(self):\r\n for j, k in enumerate(self.l1._oi_strat.keys()):\r\n self.l1._oi_strat[k]['accuracy'][2] = 1000 - j\r\n self.l1._get_winners()\r\n oi_accs = [v['accuracy'][2] for v in self.l1._oi_strat.values()]\r\n for j in range(921, 1001):\r\n with self.subTest(j=j):\r\n self.assertTrue(j in oi_accs)\r\n self.assertEqual(min(oi_accs), 921)\r\n self.assertEqual(max(oi_accs), 1000)\r\n self.l2 = self._makeMML(3002, 1)\r\n for j, k in enumerate(self.l2._oi_strat.keys()):\r\n self.l2._oi_strat[k]['accuracy'][2] = 1000 - j\r\n if j % 5 == 0:\r\n self.l2._oi_strat[k]['accuracy'][1] = j\r\n self.l2._get_winners()\r\n oi_accs = [v['accuracy'] for v in self.l2._oi_strat.values()]\r\n for j in range(0, 100, 5):\r\n with self.subTest(j=j):\r\n self.assertTrue(j in [a[1] for a in oi_accs])\r\n @unittest.skip('for now') \r\n def test_find_winners_arr(self):\r\n for j, k in enumerate(self.l1._arr_strat.keys()):\r\n self.l1._arr_strat[k]['accuracy'][2] = 1000 - j\r\n self.l1._find_winners()\r\n arr_accs = [v['accuracy'][2] for v in self.l1._arr_strat.values()]\r\n for j in range(921, 1001):\r\n with self.subTest(j=j):\r\n self.assertTrue(j in arr_accs)\r\n self.assertEqual(min(arr_accs), 921)\r\n self.assertEqual(max(arr_accs), 1000)\r\n \r\n self.l2 = self._makeMML(3002, 1)\r\n for j, k in enumerate(self.l2._arr_strat.keys()):\r\n self.l2._arr_strat[k]['accuracy'][2] = 1000 - j\r\n if j % 5 == 0:\r\n self.l2._arr_strat[k]['accuracy'][1] = j\r\n self.l2._find_winners()\r\n arr_accs = [v['accuracy'] for v in self.l2._arr_strat.values()]\r\n for j in range(0, 100, 5):\r\n with self.subTest(j=j):\r\n self.assertTrue(j in [a[1] for a in arr_accs])\r\n @unittest.skip('for now')\r\n def test_find_winners_spr(self):\r\n for j, k in enumerate(self.l1._spradj_strat.keys()):\r\n self.l1._spradj_strat[k]['rr_spread'][2] = j\r\n self.l1._find_winners()\r\n spr_rr = [kv[1]['rr_spread'][2] for kv in self.l1._spradj_strat.items()]\r\n for j in range(6, 25):\r\n with self.subTest(j=j):\r\n self.assertTrue(j in spr_rr)\r\n self.assertTrue(0 in spr_rr)\r\n self.assertEqual(min(spr_rr), 0)\r\n self.assertEqual(max(spr_rr), 24)\r\n self.assertTrue('222222222222222222222222' in self.l1._oi_strat.keys())\r\n self.assertTrue('2222222222222222' in self.l1._arr_strat.keys())\r\n self.assertTrue('22222' in self.l1._spradj_strat.keys())\r\n @unittest.skip('Method Not Used Directly')\r\n def test_uniform_selection(self):\r\n for j, k in enumerate(self.l1._oi_strat.keys()):\r\n self.l1._oi_strat[k]['accuracy'][2] = 1000 - j\r\n for j, k in enumerate(self.l1._arr_strat.keys()):\r\n self.l1._arr_strat[k]['accuracy'][2] = 1000 - j\r\n for j, k in enumerate(self.l1._spradj_strat.keys()):\r\n self.l1._spradj_strat[k]['rr_spread'][2] = j\r\n self.l1._find_winners()\r\n self.l1._uniform_selection()\r\n @unittest.skip('Method Not Used Directly')\r\n def test_weighted_selection(self):\r\n for j, k in enumerate(self.l1._oi_strat.keys()):\r\n self.l1._oi_strat[k]['accuracy'][2] = -j\r\n for j, k in enumerate(self.l1._arr_strat.keys()):\r\n self.l1._arr_strat[k]['accuracy'][2] = -j\r\n for j, k in enumerate(self.l1._spradj_strat.keys()):\r\n self.l1._spradj_strat[k]['rr_spread'][2] = j\r\n self.l1._find_winners()\r\n self.l1._weighted_selection()\r\n \r\n ''' Test before and after length of strategy dict\r\n The genetic manipulations are straightforward but run inline, making\r\n the length of the new strategy dict the only testable outcome\r\n '''\r\n def test_oi_genes_us(self):\r\n for j, k in enumerate(self.l1._oi_strat.keys()):\r\n self.l1._oi_strat[k]['accuracy'][0] = j\r\n self.l1._oi_strat[k]['accuracy'][1] = 1\r\n self.l1._oi_strat[k]['accuracy'][2] = 1000 - j\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_ngene)\r\n self.l1._get_winners()\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_keep)\r\n self.l1._oi_genes_us()\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_ngene)\r\n #for k in self.l1._oi_strat.keys():\r\n #if self.l1._oi_strat[k]['accuracy'][1] != 1:\r\n #print(self.l1._oi_strat[k])\r\n \r\n def test_arr_genes_us(self):\r\n for j, k in enumerate(self.l1._arr_strat.keys()):\r\n self.l1._arr_strat[k]['accuracy'][0] = j\r\n self.l1._arr_strat[k]['accuracy'][1] = 1\r\n self.l1._arr_strat[k]['accuracy'][2] = 1000 - j\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_ngene)\r\n self.l1._get_winners()\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_keep)\r\n self.l1._arr_genes_us()\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_ngene)\r\n #for k in self.l1._arr_strat.keys():\r\n #if self.l1._arr_strat[k]['accuracy'][1] != 1:\r\n #print(self.l1._arr_strat[k])\r\n \r\n def test_spr_genes_us(self):\r\n for j, k in enumerate(self.l1._spradj_strat.keys()):\r\n self.l1._spradj_strat[k]['rr_spread'][0] = j\r\n self.l1._spradj_strat[k]['rr_spread'][1] = 1\r\n self.l1._spradj_strat[k]['rr_spread'][2] = j\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spr_ngene)\r\n self.l1._get_winners()\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spradj_keep)\r\n self.l1._spr_genes_us()\r\n self.l1._get_winners()\r\n self.l1._spr_genes_us()\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spr_ngene)\r\n #for k in self.l1._spradj_strat.keys():\r\n #if self.l1._spradj_strat[k]['rr_spread'][1] != 1:\r\n #print(self.l1._spradj_strat[k])\r\n\r\n def test_new_genes_u(self):\r\n for j, k in enumerate(self.l1._oi_strat.keys()):\r\n self.l1._oi_strat[k]['accuracy'][0] = j\r\n self.l1._oi_strat[k]['accuracy'][1] = 1\r\n self.l1._oi_strat[k]['accuracy'][2] = 1000 - j\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_ngene)\r\n for j, k in enumerate(self.l1._arr_strat.keys()):\r\n self.l1._arr_strat[k]['accuracy'][0] = j\r\n self.l1._arr_strat[k]['accuracy'][1] = 1\r\n self.l1._arr_strat[k]['accuracy'][2] = 1000 - j\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_ngene)\r\n for j, k in enumerate(self.l1._spradj_strat.keys()):\r\n self.l1._spradj_strat[k]['rr_spread'][0] = j\r\n self.l1._spradj_strat[k]['rr_spread'][1] = 1\r\n self.l1._spradj_strat[k]['rr_spread'][2] = j\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spr_ngene)\r\n self.l1._genetics_us()\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_ngene)\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_ngene)\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spr_ngene)\r\n \r\n def test_oi_genes_ws(self):\r\n for j, k in enumerate(self.l1._oi_strat.keys()):\r\n self.l1._oi_strat[k]['accuracy'][0] = j\r\n self.l1._oi_strat[k]['accuracy'][1] = 1\r\n self.l1._oi_strat[k]['accuracy'][2] = 1000 - j\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_ngene)\r\n self.l1._get_winners()\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_keep)\r\n self.l1._oi_genes_ws()\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_ngene)\r\n #for k in self.l1._oi_strat.keys():\r\n #if self.l1._oi_strat[k]['accuracy'][1] != 1:\r\n #print(self.l1._oi_strat[k])\r\n \r\n def test_arr_genes_ws(self):\r\n for j, k in enumerate(self.l1._arr_strat.keys()):\r\n self.l1._arr_strat[k]['accuracy'][0] = j\r\n self.l1._arr_strat[k]['accuracy'][1] = 1\r\n self.l1._arr_strat[k]['accuracy'][2] = 1000 - j\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_ngene)\r\n self.l1._get_winners()\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_keep)\r\n self.l1._arr_genes_ws()\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_ngene)\r\n #for k in self.l1._arr_strat.keys():\r\n #if self.l1._arr_strat[k]['accuracy'][1] != 1:\r\n #print(self.l1._arr_strat[k])\r\n\r\n def test_spr_genes_ws(self):\r\n for j, k in enumerate(self.l1._spradj_strat.keys()):\r\n self.l1._spradj_strat[k]['rr_spread'][0] = j\r\n self.l1._spradj_strat[k]['rr_spread'][1] = 1\r\n self.l1._spradj_strat[k]['rr_spread'][2] = j\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spr_ngene)\r\n self.l1._get_winners()\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spradj_keep)\r\n self.l1._spr_genes_ws()\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spr_ngene)\r\n #for k in self.l1._spradj_strat.keys():\r\n #if self.l1._spradj_strat[k]['rr_spread'][1] != 1:\r\n #print(self.l1._spradj_strat[k])\r\n\r\n def test_new_genes_w(self):\r\n for j, k in enumerate(self.l1._oi_strat.keys()):\r\n self.l1._oi_strat[k]['accuracy'][0] = j\r\n self.l1._oi_strat[k]['accuracy'][1] = 1\r\n self.l1._oi_strat[k]['accuracy'][2] = 1000 - j\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_ngene)\r\n for j, k in enumerate(self.l1._arr_strat.keys()):\r\n self.l1._arr_strat[k]['accuracy'][0] = j\r\n self.l1._arr_strat[k]['accuracy'][1] = 1\r\n self.l1._arr_strat[k]['accuracy'][2] = 1000 - j\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_ngene)\r\n for j, k in enumerate(self.l1._spradj_strat.keys()):\r\n self.l1._spradj_strat[k]['rr_spread'][0] = j\r\n self.l1._spradj_strat[k]['rr_spread'][1] = 1\r\n self.l1._spradj_strat[k]['rr_spread'][2] = j\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spr_ngene)\r\n self.l1._genetics_ws()\r\n self.assertEqual(len(self.l1._oi_strat), self.l1._oi_ngene)\r\n self.assertEqual(len(self.l1._arr_strat), self.l1._arr_ngene)\r\n self.assertEqual(len(self.l1._spradj_strat), self.l1._spr_ngene)" ]
[ [ "numpy.arange", "numpy.random.seed" ] ]
Mateo-Lopez-Espejo/context_probe_analysis
[ "55461057fd01f00124aa46682b335313af9cc0f8" ]
[ "scripts/4_sam_data/200221_NTI_CPN_comparison.py" ]
[ "import itertools as itt\r\nimport pathlib as pl\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.io import loadmat\r\n\r\nimport src.data.rasters\r\nfrom src.data import dPCA as cdPCA\r\nfrom src.metrics import dprime as cDP\r\nfrom src.data.load import load, get_site_ids\r\nfrom src.data.cache import make_cache, get_cache\r\nfrom src.metrics.reliability import signal_reliability\r\nfrom src.utils.tools import shuffle_along_axis as shuffle\r\nfrom src.utils import fits as fit\r\nfrom src.visualization import fancy_plots as fplt\r\n\r\n'''\r\nsince applying the dprime CPN analysis toe the NTI data was unsuccessfull, the next alternative to compare Sam and my\r\napproach is to perform the CPN and NTI analysis to their respective datasets on recording sites that have both data\r\n'''\r\n\r\n\r\n# 1. list sites with both datasets\r\n# list all NTI sites this have to be done manually\r\n# list all CPN sites, this should be trivial\r\n# check the intersection\r\n\r\n# 2. Calculates the dPrime for each site and all possible probes, context pairs and cells (?). This is the difficult part\r\n# to summarize the outcome of all the\r\n\r\n\r\ndef cell_dprime(site, probe, meta):\r\n # recs = load(site, remote=True, rasterfs=meta['raster_fs'], recache=False)\r\n recs = load(site, rasterfs=meta['raster_fs'], recache=rec_recache)\r\n if len(recs) > 2:\r\n print(f'\\n\\n{recs.keys()}\\n\\n')\r\n\r\n rec = recs['trip0']\r\n sig = rec['resp']\r\n\r\n # calculates response realiability and select only good cells to improve analysis\r\n r_vals, goodcells = signal_reliability(sig, r'\\ASTIM_*', threshold=meta['reliability'])\r\n goodcells = goodcells.tolist()\r\n\r\n # get the full data raster Context x Probe x Rep x Neuron x Time\r\n raster = src.data.rasters.raster_from_sig(sig, probe, channels=goodcells, contexts=meta['transitions'],\r\n smooth_window=meta['smoothing_window'], raster_fs=meta['raster_fs'],\r\n zscore=meta['zscore'], part='probe')\r\n\r\n # trialR shape: Trial x Cell x Context x Probe x Time; R shape: Cell x Context x Probe x Time\r\n trialR, R, _ = cdPCA.format_raster(raster)\r\n trialR, R = trialR.squeeze(axis=3), R.squeeze(axis=2) # squeezes out probe\r\n\r\n rep, chn, ctx, tme = trialR.shape\r\n\r\n trans_pairs = [f'{x}_{y}' for x, y in itt.combinations(meta['transitions'], 2)]\r\n\r\n dprime = cDP.pairwise_dprimes(trialR, observation_axis=0, condition_axis=2) # shape CellPair x Cell x Time\r\n\r\n # Shuffles the rasters n times and organizes in an array with the same shape the raster plus one dimension\r\n # with size n containing each shuffle\r\n\r\n shuffled = list()\r\n # pbar = ProgressBar()\r\n print(f\"\\nshuffling {meta['montecarlo']} times\")\r\n for tp in trans_pairs:\r\n shuf_trialR = np.empty([meta['montecarlo'], rep, chn, 2, tme])\r\n shuf_trialR[:] = np.nan\r\n\r\n tran_idx = np.array([meta['transitions'].index(t) for t in tp.split('_')])\r\n ctx_shuffle = trialR[:, :, tran_idx, :].copy()\r\n\r\n for rr in range(meta['montecarlo']):\r\n shuf_trialR[rr, ...] = shuffle(ctx_shuffle, shuffle_axis=2, indie_axis=0)\r\n\r\n shuffled.append(cDP.pairwise_dprimes(shuf_trialR, observation_axis=1, condition_axis=3))\r\n\r\n shuffled = np.stack(shuffled, axis=1).squeeze(axis=0).swapaxes(0, 1) # shape Montecarlo x ContextPair x Cell x Time\r\n\r\n return dprime, shuffled, goodcells, trans_pairs\r\n\r\n\r\ndef dPCA_fourway_analysis(site, probe, meta):\r\n # recs = load(site, remote=True, rasterfs=meta['raster_fs'], recache=False)\r\n recs = load(site, rasterfs=meta['raster_fs'], recache=rec_recache)\r\n\r\n if len(recs) > 2:\r\n print(f'\\n\\n{recs.keys()}\\n\\n')\r\n\r\n rec = recs['trip0']\r\n sig = rec['resp']\r\n\r\n # calculates response realiability and select only good cells to improve analysis\r\n r_vals, goodcells = signal_reliability(sig, r'\\ASTIM_*', threshold=meta['reliability'])\r\n goodcells = goodcells.tolist()\r\n\r\n # get the full data raster Context x Probe x Rep x Neuron x Time\r\n raster = src.data.rasters.raster_from_sig(sig, probe, channels=goodcells, contexts=meta['transitions'],\r\n smooth_window=meta['smoothing_window'], raster_fs=meta['raster_fs'],\r\n zscore=meta['zscore'])\r\n\r\n # trialR shape: Trial x Cell x Context x Probe x Time; R shape: Cell x Context x Probe x Time\r\n trialR, R, _ = cdPCA.format_raster(raster)\r\n trialR, R = trialR.squeeze(axis=3), R.squeeze(axis=2) # squeezes out probe\r\n Re, C, S, T = trialR.shape\r\n\r\n # calculates full dPCA. i.e. considering all 4 categories\r\n dPCA_projection, dPCA_transformation = cdPCA.fit_transform(R, trialR)\r\n dprime = cDP.pairwise_dprimes(dPCA_projection, observation_axis=0, condition_axis=1)\r\n\r\n # calculates floor (ctx shuffle) and ceiling (simulated data)\r\n sim_dprime = np.empty([meta['montecarlo']] + list(dprime.shape))\r\n shuf_dprime = np.empty([meta['montecarlo']] + list(dprime.shape))\r\n\r\n ctx_shuffle = trialR.copy()\r\n # pbar = ProgressBar()\r\n for rr in range(meta['montecarlo']):\r\n # ceiling: simulates data, calculates dprimes\r\n sim_trial = np.random.normal(np.mean(trialR, axis=0), np.std(trialR, axis=0),\r\n size=[Re, C, S, T])\r\n sim_projection = cdPCA.transform(sim_trial, dPCA_transformation)\r\n sim_dprime[rr, ...] = cDP.pairwise_dprimes(sim_projection, observation_axis=0, condition_axis=1)\r\n\r\n ctx_shuffle = shuffle(ctx_shuffle, shuffle_axis=2, indie_axis=0)\r\n shuf_projection = cdPCA.transform(ctx_shuffle, dPCA_transformation)\r\n shuf_dprime[rr, ...] = cDP.pairwise_dprimes(shuf_projection, observation_axis=0, condition_axis=1)\r\n\r\n return dprime, shuf_dprime, sim_dprime, goodcells\r\n\r\n# transferable plotting parameters\r\nplt.rcParams['svg.fonttype'] = 'none'\r\nsup_title_size = 30\r\nsub_title_size = 20\r\nax_lab_size = 15\r\nax_val_size = 11\r\n\r\nmeta = {'reliability': 0.1, # r value\r\n 'smoothing_window': 0, # ms\r\n 'raster_fs': 30,\r\n 'transitions': ['silence', 'continuous', 'similar', 'sharp'],\r\n 'montecarlo': 1000,\r\n 'zscore': False}\r\n\r\ndprime_recache = False\r\nrec_recache = False\r\n\r\nanalysis_name = 'NTI_singel_cell_dprime'\r\nanalysis_parameters = '_'.join(['{}-{}'.format(key, str(val)) for key, val in meta.items()])\r\ncode_to_name = {'t': 'Probe', 'ct': 'Context'}\r\nfull_screen = [19.2, 9.83]\r\n\r\nall_probes = [2, 3, 5, 6]\r\n\r\nsites = ['ley070a', # good site. A1\r\n 'ley072b', # Primary looking responses with strong contextual effects\r\n 'AMT028b', # good site\r\n 'AMT029a', # Strong response, somehow visible contextual effects\r\n 'AMT030a', # low responses, Ok but not as good\r\n # 'AMT031a', # low response, bad\r\n 'AMT032a'] # great site. PEG\r\n\r\nsites = list(get_site_ids(316).keys())\r\n# problem sites:\r\n# sites = ['AMT031a']\r\n\r\n\r\n# for site, probe in zip(['AMT029a', 'ley070a'],[5,2]):\r\n# all_sites = ['AMT029a']\r\n# all_sites = ['AMT032a']\r\n# all_probes = [5]\r\n\r\nbad_sites = list()\r\nall_pvalues = dict()\r\nall_reals = dict()\r\nall_shuffled = dict()\r\n\r\nfor site in sites:\r\n\r\n this_site_reals = list()\r\n this_site_shuffled = list()\r\n this_site_pvalues = list()\r\n for pp, probe in enumerate(all_probes):\r\n # single cell analysis\r\n object_name = f'200221_{site}_P{probe}_single_cell_dprime'\r\n analysis_parameters = '_'.join(['{}-{}'.format(key, str(val)) for key, val in meta.items()])\r\n analysis_name = 'CPN_singel_cell_dprime'\r\n cache_folder = pl.Path('C:\\\\', 'users', 'mateo', 'mycache', analysis_name, analysis_parameters)\r\n\r\n SC_cache = make_cache(function=cell_dprime,\r\n func_args={'site': site, 'probe': probe, 'meta': meta},\r\n classobj_name=object_name,\r\n cache_folder=cache_folder,\r\n recache=dprime_recache)\r\n\r\n dprime, shuf_dprime, cell_names, trans_pairs = get_cache(SC_cache)\r\n\r\n this_site_reals.append(dprime)\r\n this_site_shuffled.append(shuf_dprime)\r\n\r\n # single tailed p value base on the montecarlo shuffling\r\n SC_pvalues = np.sum((shuf_dprime >= dprime), axis=0) / meta['montecarlo']\r\n this_site_pvalues.append(SC_pvalues)\r\n\r\n this_site_reals = np.stack(this_site_reals, axis=0)\r\n this_site_shuffled = np.stack(this_site_shuffled, axis=0)\r\n this_site_pvalues = np.stack(this_site_pvalues, axis=0)\r\n\r\n # reorders date in dictionary of cells\r\n for cc, cell in enumerate(cell_names):\r\n all_reals[cell] = this_site_reals[:, :, cc, :]\r\n all_shuffled[cell] = this_site_shuffled[:, :, :, cc, :].swapaxes(0, 1)\r\n all_pvalues[cell] = this_site_pvalues[:, :, cc, :]\r\n\r\n# stacks the site individual arrays along a new site dimension. since the sites have disimilar cell number, pads\r\nall_cells = np.array(list(all_pvalues.keys()))\r\n\r\nthreshold = 0.05\r\nall_signif = {key: (val <= threshold) for key, val in all_pvalues.items()}\r\n\r\n# stacks arrays, with different time dimentions, padding with NAN\r\nshape = np.insert(np.max(np.stack([arr.shape for arr in all_signif.values()], axis=0), axis=0), 0,\r\n len(all_signif))\r\nsignif_array = np.empty(shape)\r\nsignif_array[:] = np.nan\r\nfor cc, arr in enumerate(all_signif.values()):\r\n t = arr.shape[-1]\r\n signif_array[cc, :, :, :t] = arr\r\n# sig_array = np.stack(list(all_signif.values()), axis=0) # dimensions: Cell x Probe x trans_pair x time\r\n\r\n# calculates exponential decay for each cell, collapsing across all probes and transisions\r\nnbin = signif_array.shape[-1]\r\nfs = meta['raster_fs']\r\ntimes = np.linspace(0, nbin / fs, nbin, endpoint=False) * 1000 # units in ms!!!!\r\ncollapsed = signif_array.mean(axis=(1, 2))\r\n\r\n# organizes in a dataframe with columns r0: y intercept, decay: eponential valaue and tau: Time to a 36% amplitude\r\ndf = list()\r\nfor cellid, data in zip(all_cells, collapsed):\r\n popt, _, _ = fit.exp_decay(times, data)\r\n df.append({'cellid': cellid,\r\n 'r0_au': popt[0],\r\n 'decay_ms': popt[1]})\r\n\r\ncontext_fits = pd.DataFrame(df)\r\ncontext_fits['tau_ms'] = -1/context_fits['decay_ms']\r\ncontext_fits.set_index(['cellid'], inplace=True)\r\n\r\n\r\n# 3. import and parse matlab results for Sam's NTI analysis. These results are in a cell by cell format, then it makes\r\n# sense to calculate the dprimes idividually forP each cell\r\nfile = pl.Path('C:\\\\', 'Users', 'Mateo', 'Documents', 'Science', 'code', 'integration_quilt', 'scrambling-ferrets',\r\n 'analysis', 'model_fit_pop_summary').with_suffix('.mat')\r\n\r\nbest_fits = loadmat(file)['best_fits'].squeeze()\r\n# orders the data in DF\r\ndf = list()\r\nfor row in best_fits:\r\n df.append({'cellid': row[2][0],\r\n 'intper_ms': row[0][0][0],\r\n 'delay_ms': row[1][0][0]})\r\n\r\nintegration_fits = pd.DataFrame(df)\r\nintegration_fits.set_index(['cellid'], inplace=True)\r\n\r\n\r\n# 4. pools together both approache, selects only common cell, plots relationships\r\n# join='inner' keeps only the intersection between the two Dfs, i.e. the cells that have both approaches\r\ndef savefig(fig, root, name):\r\n root = pl.Path(f'C:\\\\users\\\\mateo\\\\Pictures\\\\{root}')\r\n if not root.exists(): root.mkdir(parents=True, exist_ok=True)\r\n png = root.joinpath(name).with_suffix('.png')\r\n fig.savefig(png, transparent=False, dpi=100)\r\n # svg = root.joinpath(name).with_suffix('.svg')\r\n # fig.savefig(svg, transparent=True)\r\n\r\nDF = pd.concat([context_fits, integration_fits], axis=1, join='inner')\r\n# filter out anomalous outliers i.e taus over 1 second due to poor fiting\r\nff_good = DF['tau_ms'] < 1000\r\n\r\nfiltered = DF.loc[ff_good, :]\r\n\r\n\r\nfig_root = 'sam_vs_mat'\r\nx = filtered['tau_ms']\r\ny = filtered['intper_ms']\r\n\r\nfig, ax = plt.subplots(figsize=full_screen)\r\nax.scatter(x, y)\r\n_ = fplt.lin_reg(x,y, ax=ax)\r\nax.set_xlabel(x.name)\r\nax.set_ylabel(y.name)\r\nax.legend()\r\nfig.tight_layout(rect=(0, 0, 1, 0.95))\r\ntitle = 'Sam integration vs Mateo Tau'\r\nfig.suptitle(title)\r\nsavefig(fig, fig_root, title)" ]
[ [ "scipy.io.loadmat", "numpy.sum", "numpy.empty", "pandas.DataFrame", "matplotlib.pyplot.subplots", "pandas.concat", "numpy.stack", "numpy.std", "numpy.linspace", "numpy.mean" ] ]
kastalpes/yt
[ "b1e197ca84433fbd61eaf44b28ff5cdb37981d4c" ]
[ "doc/source/cookbook/time_series.py" ]
[ "import yt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Enable parallelism in the script (assuming it was called with\n# `mpirun -np <n_procs>` )\nyt.enable_parallelism()\n\n# By using wildcards such as ? and * with the load command, we can load up a\n# Time Series containing all of these datasets simultaneously.\nts = yt.load('GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0*')\n\nstorage = {}\n\n# By using the piter() function, we can iterate on every dataset in\n# the TimeSeries object. By using the storage keyword, we can populate\n# a dictionary where the dataset is the key, and sto.result is the value\n# for later use when the loop is complete.\n\n# The serial equivalent of piter() here is just \"for ds in ts:\" .\n\nfor store, ds in ts.piter(storage=storage):\n\n # Create a sphere of radius 100 kpc at the center of the dataset volume\n sphere = ds.sphere(\"c\", (100., \"kpc\"))\n # Calculate the entropy within that sphere\n entr = sphere[\"entropy\"].sum()\n # Store the current time and sphere entropy for this dataset in our\n # storage dictionary as a tuple\n store.result = (ds.current_time.in_units('Gyr'), entr)\n\n# Convert the storage dictionary values to a Nx2 array, so the can be easily\n# plotted\narr = np.array(list(storage.values()))\n\n# Plot up the results: time versus entropy\nplt.semilogy(arr[:,0], arr[:,1], 'r-')\nplt.xlabel(\"Time (Gyr)\")\nplt.ylabel(\"Entropy (ergs/K)\")\nplt.savefig(\"time_versus_entropy.png\")\n" ]
[ [ "matplotlib.pyplot.ylabel", "matplotlib.pyplot.semilogy", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig" ] ]
steffenerickson/pytorch
[ "0b656c4c69ce77ecd9aace486e471917e4660746" ]
[ "test/fx2trt/converters/acc_op/test_batchnorm.py" ]
[ "# Owner(s): [\"oncall: aiacc\"]\n\nimport torch\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nfrom torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec\nfrom torch.testing._internal.common_utils import run_tests\n\n\nclass TestBatchNormConverter(AccTestCase):\n def test_batchnorm(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(x)\n\n inputs = [torch.randn(1, 3, 224, 224)]\n self.run_test(TestModule(), inputs, expected_ops={acc_ops.batch_norm})\n\n def test_batchnorm_with_dynamic_shape(self):\n class TestModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.bn = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n return self.bn(x)\n\n input_specs = [\n InputTensorSpec(\n shape=(-1, 3, -1, -1),\n dtype=torch.float32,\n shape_ranges=[((1, 3, 1, 1), (1, 3, 5, 5), (2, 3, 10, 10))],\n ),\n ]\n\n self.run_test_with_dynamic_shape(\n TestModule(), input_specs, expected_ops={acc_ops.batch_norm}\n )\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.randn", "torch.testing._internal.common_utils.run_tests", "torch.testing._internal.common_fx2trt.InputTensorSpec", "torch.nn.BatchNorm2d" ] ]
hieultp/stylegan2-ada-pytorch
[ "d09653fc1c4a8eefe64f29b3e33a2afb3bdd3d22" ]
[ "generate.py" ]
[ "# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\"\"\"Generate images using pretrained network pickle.\"\"\"\n\nimport os\nimport subprocess\nimport re\nfrom typing import List, Optional\n\nimport click\nimport dnnlib\nimport numpy as np\nfrom numpy import linalg\nimport PIL.Image\nimport torch\n\nimport legacy\n\nfrom opensimplex import OpenSimplex\n\n# ---------------------------------------------------------------------------\n\nclass OSN():\n min = -1\n max = 1\n\n def __init__(self, seed, diameter):\n self.tmp = OpenSimplex(seed)\n self.d = diameter\n self.x = 0\n self.y = 0\n\n def get_val(self, angle):\n self.xoff = valmap(np.cos(angle), -1, 1, self.x, self.x + self.d)\n self.yoff = valmap(np.sin(angle), -1, 1, self.y, self.y + self.d)\n return self.tmp.noise2(self.xoff,self.yoff)\n\ndef circularloop(nf, d, seed, seeds):\n r = d/2\n\n zs = []\n # hardcoding in 512, prob TODO fix needed\n # latents_c = rnd.randn(1, G.input_shape[1])\n\n if(seeds is None):\n if seed:\n rnd = np.random.RandomState(seed)\n else:\n rnd = np.random\n latents_a = rnd.randn(1, 512)\n latents_b = rnd.randn(1, 512)\n latents_c = rnd.randn(1, 512)\n elif(len(seeds) is not 3):\n assert('Must choose exactly 3 seeds!')\n else:\n latents_a = np.random.RandomState(int(seeds[0])).randn(1, 512)\n latents_b = np.random.RandomState(int(seeds[1])).randn(1, 512)\n latents_c = np.random.RandomState(int(seeds[2])).randn(1, 512)\n\n latents = (latents_a, latents_b, latents_c)\n\n current_pos = 0.0\n step = 1./nf\n\n while(current_pos < 1.0):\n zs.append(circular_interpolation(r, latents, current_pos))\n current_pos += step\n return zs\n\ndef circular_interpolation(radius, latents_persistent, latents_interpolate):\n latents_a, latents_b, latents_c = latents_persistent\n\n latents_axis_x = (latents_a - latents_b).flatten() / linalg.norm(latents_a - latents_b)\n latents_axis_y = (latents_a - latents_c).flatten() / linalg.norm(latents_a - latents_c)\n\n latents_x = np.sin(np.pi * 2.0 * latents_interpolate) * radius\n latents_y = np.cos(np.pi * 2.0 * latents_interpolate) * radius\n\n latents = latents_a + latents_x * latents_axis_x + latents_y * latents_axis_y\n return latents\n\ndef num_range(s: str) -> List[int]:\n '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''\n\n range_re = re.compile(r'^(\\d+)-(\\d+)$')\n m = range_re.match(s)\n if m:\n return list(range(int(m.group(1)), int(m.group(2))+1))\n vals = s.split(',')\n return [int(x) for x in vals]\n\ndef size_range(s: str) -> List[int]:\n '''Accept a range 'a-c' and return as a list of 2 ints.'''\n return [int(v) for v in s.split('-')][::-1]\n\ndef line_interpolate(zs, steps, easing):\n out = []\n for i in range(len(zs)-1):\n for index in range(steps):\n t = index/float(steps)\n\n if(easing == 'linear'):\n out.append(zs[i+1]*t + zs[i]*(1-t))\n elif (easing == 'easeInOutQuad'):\n if(t < 0.5):\n fr = 2 * t * t\n else:\n fr = (-2 * t * t) + (4 * t) - 1\n out.append(zs[i+1]*fr + zs[i]*(1-fr))\n elif (easing == 'bounceEaseOut'):\n if (t < 4/11):\n fr = 121 * t * t / 16\n elif (t < 8/11):\n fr = (363 / 40.0 * t * t) - (99 / 10.0 * t) + 17 / 5.0\n elif t < 9/10:\n fr = (4356 / 361.0 * t * t) - (35442 / 1805.0 * t) + 16061 / 1805.0\n else:\n fr = (54 / 5.0 * t * t) - (513 / 25.0 * t) + 268 / 25.0\n out.append(zs[i+1]*fr + zs[i]*(1-fr))\n elif (easing == 'circularEaseOut'):\n fr = np.sqrt((2 - t) * t)\n out.append(zs[i+1]*fr + zs[i]*(1-fr))\n elif (easing == 'circularEaseOut2'):\n fr = np.sqrt(np.sqrt((2 - t) * t))\n out.append(zs[i+1]*fr + zs[i]*(1-fr))\n elif(easing == 'backEaseOut'):\n p = 1 - t\n fr = 1 - (p * p * p - p * math.sin(p * math.pi))\n out.append(zs[i+1]*fr + zs[i]*(1-fr))\n return out\n\ndef noiseloop(nf, d, seed):\n if seed:\n np.random.RandomState(seed)\n\n features = []\n zs = []\n for i in range(512):\n features.append(OSN(i+seed,d))\n\n inc = (np.pi*2)/nf\n for f in range(nf):\n z = np.random.randn(1, 512)\n for i in range(512):\n z[0,i] = features[i].get_val(inc*f)\n zs.append(z)\n\n return zs\n\ndef images(G,device,inputs,space,truncation_psi,label,noise_mode,outdir,start=None,stop=None):\n if(start is not None and stop is not None):\n tp = start\n tp_i = (stop-start)/len(inputs)\n\n for idx, i in enumerate(inputs):\n print('Generating image for frame %d/%d ...' % (idx, len(inputs)))\n \n if (space=='z'):\n z = torch.from_numpy(i).to(device)\n if(start is not None and stop is not None):\n img = G(z, label, truncation_psi=tp, noise_mode=noise_mode)\n tp = tp+tp_i\n else:\n img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)\n else:\n if len(i.shape) == 2: \n i = torch.from_numpy(i).unsqueeze(0).to(device)\n img = G.synthesis(i, noise_mode=noise_mode, force_fp32=True)\n img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/frame{idx:04d}.png')\n\ndef interpolate(G,device,projected_w,seeds,random_seed,space,truncation_psi,label,frames,noise_mode,outdir,interpolation,easing,diameter,start=None,stop=None):\n if(interpolation=='noiseloop' or interpolation=='circularloop'):\n if seeds is not None:\n print(f'Warning: interpolation type: \"{interpolation}\" doesn’t support set seeds.')\n\n if(interpolation=='noiseloop'):\n points = noiseloop(frames, diameter, random_seed)\n elif(interpolation=='circularloop'):\n points = circularloop(frames, diameter, random_seed, seeds)\n\n else:\n if projected_w is not None:\n points = np.load(projected_w)['w']\n else:\n # get zs from seeds\n points = seeds_to_zs(G,seeds) \n # convert to ws\n if(space=='w'):\n points = zs_to_ws(G,device,label,truncation_psi,points)\n\n # get interpolation points\n if(interpolation=='linear'):\n points = line_interpolate(points,frames,easing)\n elif(interpolation=='slerp'):\n points = slerp_interpolate(points,frames)\n \n # generate frames\n images(G,device,points,space,truncation_psi,label,noise_mode,outdir,start,stop)\n\ndef seeds_to_zs(G,seeds):\n zs = []\n for seed_idx, seed in enumerate(seeds):\n z = np.random.RandomState(seed).randn(1, G.z_dim)\n zs.append(z)\n return zs\n\n# slightly modified version of\n# https://github.com/PDillis/stylegan2-fun/blob/master/run_generator.py#L399\ndef slerp(t, v0, v1, DOT_THRESHOLD=0.9995):\n '''\n Spherical linear interpolation\n Args:\n t (float/np.ndarray): Float value between 0.0 and 1.0\n v0 (np.ndarray): Starting vector\n v1 (np.ndarray): Final vector\n DOT_THRESHOLD (float): Threshold for considering the two vectors as\n colineal. Not recommended to alter this.\n Returns:\n v2 (np.ndarray): Interpolation vector between v0 and v1\n '''\n v0 = v0.cpu().detach().numpy()\n v1 = v1.cpu().detach().numpy()\n # Copy the vectors to reuse them later\n v0_copy = np.copy(v0)\n v1_copy = np.copy(v1)\n # Normalize the vectors to get the directions and angles\n v0 = v0 / np.linalg.norm(v0)\n v1 = v1 / np.linalg.norm(v1)\n # Dot product with the normalized vectors (can't use np.dot in W)\n dot = np.sum(v0 * v1)\n # If absolute value of dot product is almost 1, vectors are ~colineal, so use lerp\n if np.abs(dot) > DOT_THRESHOLD:\n return lerp(t, v0_copy, v1_copy)\n # Calculate initial angle between v0 and v1\n theta_0 = np.arccos(dot)\n sin_theta_0 = np.sin(theta_0)\n # Angle at timestep t\n theta_t = theta_0 * t\n sin_theta_t = np.sin(theta_t)\n # Finish the slerp algorithm\n s0 = np.sin(theta_0 - theta_t) / sin_theta_0\n s1 = sin_theta_t / sin_theta_0\n v2 = s0 * v0_copy + s1 * v1_copy\n return torch.from_numpy(v2).to(\"cuda\")\n\ndef slerp_interpolate(zs, steps):\n out = []\n for i in range(len(zs)-1):\n for index in range(steps):\n fraction = index/float(steps)\n out.append(slerp(fraction,zs[i],zs[i+1]))\n return out\n\ndef truncation_traversal(G,device,z,label,start,stop,increment,noise_mode,outdir):\n count = 1\n trunc = start\n\n z = seeds_to_zs(G,z)[0]\n z = torch.from_numpy(np.asarray(z)).to(device)\n\n while trunc <= stop:\n print('Generating truncation %0.2f' % trunc)\n \n img = G(z, label, truncation_psi=trunc, noise_mode=noise_mode)\n img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/frame{count:04d}.png')\n\n trunc+=increment\n count+=1\n\ndef valmap(value, istart, istop, ostart, ostop):\n return ostart + (ostop - ostart) * ((value - istart) / (istop - istart))\n\ndef zs_to_ws(G,device,label,truncation_psi,zs):\n ws = []\n for z_idx, z in enumerate(zs):\n z = torch.from_numpy(z).to(device)\n w = G.mapping(z, label, truncation_psi=truncation_psi, truncation_cutoff=8)\n ws.append(w)\n return ws\n\n#----------------------------------------------------------------------------\n\[email protected]()\[email protected]_context\[email protected]('--network', 'network_pkl', help='Network pickle filename', required=True)\[email protected]('--seeds', type=num_range, help='List of random seeds')\[email protected]('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)\[email protected]('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')\[email protected]('--diameter', type=float, help='diameter of loops', default=100.0, show_default=True)\[email protected]('--frames', type=int, help='how many frames to produce (with seeds this is frames between each step, with loops this is total length)', default=240, show_default=True)\[email protected]('--fps', type=int, help='framerate for video', default=24, show_default=True)\[email protected]('--increment', type=float, help='truncation increment value', default=0.01, show_default=True)\[email protected]('--interpolation', type=click.Choice(['linear', 'slerp', 'noiseloop', 'circularloop']), default='linear', help='interpolation type', required=True)\[email protected]('--easing',\n type=click.Choice(['linear', 'easeInOutQuad', 'bounceEaseOut','circularEaseOut','circularEaseOut2']),\n default='linear', help='easing method', required=True)\[email protected]('--network', 'network_pkl', help='Network pickle filename', required=True)\[email protected]('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)\[email protected]('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')\[email protected]('--process', type=click.Choice(['image', 'interpolation','truncation','interpolation-truncation']), default='image', help='generation method', required=True)\[email protected]('--projected-w', help='Projection result file', type=str, metavar='FILE')\[email protected]('--random_seed', type=int, help='random seed value (used in noise and circular loop)', default=0, show_default=True)\[email protected]('--scale-type',\n type=click.Choice(['pad', 'padside', 'symm','symmside']),\n default='pad', help='scaling method for --size', required=False)\[email protected]('--size', type=size_range, help='size of output (in format x-y)')\[email protected]('--seeds', type=num_range, help='List of random seeds')\[email protected]('--space', type=click.Choice(['z', 'w']), default='z', help='latent space', required=True)\[email protected]('--start', type=float, help='starting truncation value', default=0.0, show_default=True)\[email protected]('--stop', type=float, help='stopping truncation value', default=1.0, show_default=True)\[email protected]('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)\n\ndef generate_images(\n ctx: click.Context,\n easing: str,\n interpolation: str,\n increment: Optional[float],\n network_pkl: str,\n process: str,\n random_seed: Optional[int],\n diameter: Optional[float],\n scale_type: Optional[str],\n size: Optional[List[int]],\n seeds: Optional[List[int]],\n space: str,\n fps: Optional[int],\n frames: Optional[int],\n truncation_psi: float,\n noise_mode: str,\n outdir: str,\n class_idx: Optional[int],\n projected_w: Optional[str],\n start: Optional[float],\n stop: Optional[float],\n):\n \"\"\"Generate images using pretrained network pickle.\n\n Examples:\n\n \\b\n # Generate curated MetFaces images without truncation (Fig.10 left)\n python generate.py --outdir=out --trunc=1 --seeds=85,265,297,849 \\\\\n --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl\n\n \\b\n # Generate uncurated MetFaces images with truncation (Fig.12 upper left)\n python generate.py --outdir=out --trunc=0.7 --seeds=600-605 \\\\\n --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl\n\n \\b\n # Generate class conditional CIFAR-10 images (Fig.17 left, Car)\n python generate.py --outdir=out --seeds=0-35 --class=1 \\\\\n --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/cifar10.pkl\n\n \\b\n # Render an image from projected W\n python generate.py --outdir=out --projected_w=projected_w.npz \\\\\n --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl\n \"\"\"\n \n # custom size code from https://github.com/eps696/stylegan2ada/blob/master/src/_genSGAN2.py\n if(size): \n print('render custom size: ',size)\n print('padding method:', scale_type )\n custom = True\n else:\n custom = False\n\n G_kwargs = dnnlib.EasyDict()\n G_kwargs.size = size \n G_kwargs.scale_type = scale_type\n\n # mask/blend latents with external latmask or by splitting the frame\n latmask = False #temp\n if latmask is None:\n nHW = [int(s) for s in a.nXY.split('-')][::-1]\n assert len(nHW)==2, ' Wrong count nXY: %d (must be 2)' % len(nHW)\n n_mult = nHW[0] * nHW[1]\n # if a.verbose is True and n_mult > 1: print(' Latent blending w/split frame %d x %d' % (nHW[1], nHW[0]))\n lmask = np.tile(np.asarray([[[[1]]]]), (1,n_mult,1,1))\n Gs_kwargs.countHW = nHW\n Gs_kwargs.splitfine = a.splitfine\n lmask = torch.from_numpy(lmask).to(device)\n # else:\n # if a.verbose is True: print(' Latent blending with mask', a.latmask)\n # n_mult = 2\n # if os.path.isfile(a.latmask): # single file\n # lmask = np.asarray([[img_read(a.latmask)[:,:,0] / 255.]]) # [h,w]\n # elif os.path.isdir(a.latmask): # directory with frame sequence\n # lmask = np.asarray([[img_read(f)[:,:,0] / 255. for f in img_list(a.latmask)]]) # [h,w]\n # else:\n # print(' !! Blending mask not found:', a.latmask); exit(1)\n # lmask = np.concatenate((lmask, 1 - lmask), 1) # [frm,2,h,w]\n # lmask = torch.from_numpy(lmask).to(device)\n\n print('Loading networks from \"%s\"...' % network_pkl)\n device = torch.device('cuda')\n with dnnlib.util.open_url(network_pkl) as f:\n # G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore\n G = legacy.load_network_pkl(f, custom=custom, **G_kwargs)['G_ema'].to(device) # type: ignore\n\n os.makedirs(outdir, exist_ok=True)\n\n # Synthesize the result of a W projection.\n if (process=='image') and projected_w is not None:\n if seeds is not None:\n print ('Warning: --seeds is ignored when using --projected-w')\n print(f'Generating images from projected W \"{projected_w}\"')\n ws = np.load(projected_w)['w']\n ws = torch.tensor(ws, device=device) # pylint: disable=not-callable\n assert ws.shape[1:] == (G.num_ws, G.w_dim)\n for idx, w in enumerate(ws):\n img = G.synthesis(w.unsqueeze(0), noise_mode=noise_mode)\n img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/proj{idx:02d}.png')\n return\n\n # Labels.\n label = torch.zeros([1, G.c_dim], device=device)\n if G.c_dim != 0:\n if class_idx is None:\n ctx.fail('Must specify class label with --class when using a conditional network')\n label[:, class_idx] = 1\n else:\n if class_idx is not None:\n print ('warn: --class=lbl ignored when running on an unconditional network')\n\n\n if(process=='image'):\n if seeds is None:\n ctx.fail('--seeds option is required when not using --projected-w')\n\n # Generate images.\n for seed_idx, seed in enumerate(seeds):\n print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))\n z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)\n img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)\n img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)\n PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')\n\n elif(process=='interpolation' or process=='interpolation-truncation'):\n # create path for frames\n dirpath = os.path.join(outdir,'frames')\n os.makedirs(dirpath, exist_ok=True)\n\n # autogenerate video name: not great!\n if seeds is not None:\n seedstr = '_'.join([str(seed) for seed in seeds])\n vidname = f'{process}-{interpolation}-seeds_{seedstr}-{fps}fps'\n elif(interpolation=='noiseloop' or 'circularloop'):\n vidname = f'{process}-{interpolation}-{diameter}dia-seed_{random_seed}-{fps}fps'\n\n if process=='interpolation-truncation':\n interpolate(G,device,projected_w,seeds,random_seed,space,truncation_psi,label,frames,noise_mode,dirpath,interpolation,easing,diameter,start,stop)\n else:\n interpolate(G,device,projected_w,seeds,random_seed,space,truncation_psi,label,frames,noise_mode,dirpath,interpolation,easing,diameter)\n\n # convert to video\n cmd=f'ffmpeg -y -r {fps} -i {dirpath}/frame%04d.png -vcodec libx264 -pix_fmt yuv420p {outdir}/{vidname}.mp4'\n subprocess.call(cmd, shell=True)\n\n elif(process=='truncation'):\n if seeds is None or (len(seeds)>1):\n ctx.fail('truncation requires a single seed value')\n\n # create path for frames\n dirpath = os.path.join(outdir,'frames')\n os.makedirs(dirpath, exist_ok=True)\n\n #vidname\n seed = seeds[0]\n vidname = f'{process}-seed_{seed}-start_{start}-stop_{stop}-inc_{increment}-{fps}fps'\n\n # generate frames\n truncation_traversal(G,device,seeds,label,start,stop,increment,noise_mode,dirpath)\n\n # convert to video\n cmd=f'ffmpeg -y -r {fps} -i {dirpath}/frame%04d.png -vcodec libx264 -pix_fmt yuv420p {outdir}/{vidname}.mp4'\n subprocess.call(cmd, shell=True)\n\n#----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n generate_images() # pylint: disable=no-value-for-parameter\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.sqrt", "numpy.sum", "numpy.load", "numpy.linalg.norm", "numpy.arccos", "numpy.cos", "numpy.copy", "numpy.random.randn", "numpy.random.RandomState", "numpy.abs", "torch.tensor", "torch.from_numpy", "numpy.asarray", "torch.zeros", "numpy.sin", "torch.device" ] ]
mgxd/nitransforms
[ "a922f3cb8ee1df5b484f617c34e1816a726e54e0" ]
[ "nitransforms/tests/test_affines.py" ]
[ "import numpy as np\nfrom nibabel.affines import from_matvec\nfrom nibabel.eulerangles import euler2mat\nfrom ..patched import obliquity\n\ndef test_obliquity():\n \"\"\"Check the calculation of inclination of an affine axes.\"\"\"\n from math import pi\n aligned = np.diag([2.0, 2.0, 2.3, 1.0])\n aligned[:-1, -1] = [-10, -10, -7]\n R = from_matvec(euler2mat(x=0.09, y=0.001, z=0.001), [0.0, 0.0, 0.0])\n oblique = R.dot(aligned)\n np.testing.assert_almost_equal(obliquity(aligned), [0.0, 0.0, 0.0])\n np.testing.assert_almost_equal(obliquity(oblique) * 180 / pi,\n [0.0810285, 5.1569949, 5.1569376])\n" ]
[ [ "numpy.diag" ] ]
lizhihao6/HINet
[ "8c3fb71dc3331be32a9af3e1efe5106a0b26cefe" ]
[ "basicsr/data/reds_dataset.py" ]
[ "# ------------------------------------------------------------------------\n# Copyright (c) 2021 megvii-model. All Rights Reserved.\n# ------------------------------------------------------------------------\n# Modified from BasicSR (https://github.com/xinntao/BasicSR)\n# Copyright 2018-2020 BasicSR Authors\n# ------------------------------------------------------------------------\nimport random\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils import data as data\n\nfrom basicsr.data.transforms import augment, paired_random_crop\nfrom basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor\nfrom basicsr.utils.flow_util import dequantize_flow\n\n\nclass REDSDataset(data.Dataset):\n \"\"\"REDS dataset for training.\n\n The keys are generated from a meta info txt file.\n basicsr/data/meta_info/meta_info_REDS_GT.txt\n\n Each line contains:\n 1. subfolder (clip) name; 2. frame number; 3. image shape, seperated by\n a white space.\n Examples:\n 000 100 (720,1280,3)\n 001 100 (720,1280,3)\n ...\n\n Key examples: \"000/00000000\"\n GT (gt): Ground-Truth;\n LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.\n\n Args:\n opt (dict): Config for train dataset. It contains the following keys:\n dataroot_gt (str): Data root path for gt.\n dataroot_lq (str): Data root path for lq.\n dataroot_flow (str, optional): Data root path for flow.\n meta_info_file (str): Path for meta information file.\n val_partition (str): Validation partition types. 'REDS4' or\n 'official'.\n io_backend (dict): IO backend type and other kwarg.\n\n num_frame (int): Window size for input frames.\n gt_size (int): Cropped patched size for gt patches.\n interval_list (list): Interval list for temporal augmentation.\n random_reverse (bool): Random reverse input frames.\n use_flip (bool): Use horizontal flips.\n use_rot (bool): Use rotation (use vertical flip and transposing h\n and w for implementation).\n\n scale (bool): Scale, which will be added automatically.\n \"\"\"\n\n def __init__(self, opt):\n super(REDSDataset, self).__init__()\n self.opt = opt\n self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(\n opt['dataroot_lq'])\n self.flow_root = Path(\n opt['dataroot_flow']) if opt['dataroot_flow'] is not None else None\n assert opt['num_frame'] % 2 == 1, (\n f'num_frame should be odd number, but got {opt[\"num_frame\"]}')\n self.num_frame = opt['num_frame']\n self.num_half_frames = opt['num_frame'] // 2\n\n self.keys = []\n with open(opt['meta_info_file'], 'r') as fin:\n for line in fin:\n folder, frame_num, _ = line.split(' ')\n self.keys.extend(\n [f'{folder}/{i:08d}' for i in range(int(frame_num))])\n\n # remove the video clips used in validation\n if opt['val_partition'] == 'REDS4':\n val_partition = ['000', '011', '015', '020']\n elif opt['val_partition'] == 'official':\n val_partition = [f'{v:03d}' for v in range(240, 270)]\n else:\n raise ValueError(\n f'Wrong validation partition {opt[\"val_partition\"]}.'\n f\"Supported ones are ['official', 'REDS4'].\")\n self.keys = [\n v for v in self.keys if v.split('/')[0] not in val_partition\n ]\n\n # file client (io backend)\n self.file_client = None\n self.io_backend_opt = opt['io_backend']\n self.is_lmdb = False\n if self.io_backend_opt['type'] == 'lmdb':\n self.is_lmdb = True\n if self.flow_root is not None:\n self.io_backend_opt['db_paths'] = [\n self.lq_root, self.gt_root, self.flow_root\n ]\n self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']\n else:\n self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]\n self.io_backend_opt['client_keys'] = ['lq', 'gt']\n\n # temporal augmentation configs\n self.interval_list = opt['interval_list']\n self.random_reverse = opt['random_reverse']\n interval_str = ','.join(str(x) for x in opt['interval_list'])\n logger = get_root_logger()\n logger.info(f'Temporal augmentation interval list: [{interval_str}]; '\n f'random reverse is {self.random_reverse}.')\n\n def __getitem__(self, index):\n if self.file_client is None:\n self.file_client = FileClient(\n self.io_backend_opt.pop('type'), **self.io_backend_opt)\n\n scale = self.opt['scale']\n gt_size = self.opt['gt_size']\n key = self.keys[index]\n clip_name, frame_name = key.split('/') # key example: 000/00000000\n center_frame_idx = int(frame_name)\n\n # determine the neighboring frames\n interval = random.choice(self.interval_list)\n\n # ensure not exceeding the borders\n start_frame_idx = center_frame_idx - self.num_half_frames * interval\n end_frame_idx = center_frame_idx + self.num_half_frames * interval\n # each clip has 100 frames starting from 0 to 99\n while (start_frame_idx < 0) or (end_frame_idx > 99):\n center_frame_idx = random.randint(0, 99)\n start_frame_idx = (\n center_frame_idx - self.num_half_frames * interval)\n end_frame_idx = center_frame_idx + self.num_half_frames * interval\n frame_name = f'{center_frame_idx:08d}'\n neighbor_list = list(\n range(center_frame_idx - self.num_half_frames * interval,\n center_frame_idx + self.num_half_frames * interval + 1,\n interval))\n # random reverse\n if self.random_reverse and random.random() < 0.5:\n neighbor_list.reverse()\n\n assert len(neighbor_list) == self.num_frame, (\n f'Wrong length of neighbor list: {len(neighbor_list)}')\n\n # get the GT frame (as the center frame)\n if self.is_lmdb:\n img_gt_path = f'{clip_name}/{frame_name}'\n else:\n img_gt_path = self.gt_root / clip_name / f'{frame_name}.png'\n img_bytes = self.file_client.get(img_gt_path, 'gt')\n img_gt = imfrombytes(img_bytes, float32=True)\n\n # get the neighboring LQ frames\n img_lqs = []\n for neighbor in neighbor_list:\n if self.is_lmdb:\n img_lq_path = f'{clip_name}/{neighbor:08d}'\n else:\n img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png'\n img_bytes = self.file_client.get(img_lq_path, 'lq')\n img_lq = imfrombytes(img_bytes, float32=True)\n img_lqs.append(img_lq)\n\n # get flows\n if self.flow_root is not None:\n img_flows = []\n # read previous flows\n for i in range(self.num_half_frames, 0, -1):\n if self.is_lmdb:\n flow_path = f'{clip_name}/{frame_name}_p{i}'\n else:\n flow_path = (\n self.flow_root / clip_name / f'{frame_name}_p{i}.png')\n img_bytes = self.file_client.get(flow_path, 'flow')\n cat_flow = imfrombytes(\n img_bytes, flag='grayscale',\n float32=False) # uint8, [0, 255]\n dx, dy = np.split(cat_flow, 2, axis=0)\n flow = dequantize_flow(\n dx, dy, max_val=20,\n denorm=False) # we use max_val 20 here.\n img_flows.append(flow)\n # read next flows\n for i in range(1, self.num_half_frames + 1):\n if self.is_lmdb:\n flow_path = f'{clip_name}/{frame_name}_n{i}'\n else:\n flow_path = (\n self.flow_root / clip_name / f'{frame_name}_n{i}.png')\n img_bytes = self.file_client.get(flow_path, 'flow')\n cat_flow = imfrombytes(\n img_bytes, flag='grayscale',\n float32=False) # uint8, [0, 255]\n dx, dy = np.split(cat_flow, 2, axis=0)\n flow = dequantize_flow(\n dx, dy, max_val=20,\n denorm=False) # we use max_val 20 here.\n img_flows.append(flow)\n\n # for random crop, here, img_flows and img_lqs have the same\n # spatial size\n img_lqs.extend(img_flows)\n\n # randomly crop\n img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale,\n img_gt_path)\n if self.flow_root is not None:\n img_lqs, img_flows = img_lqs[:self.num_frame], img_lqs[self.\n num_frame:]\n\n # augmentation - flip, rotate\n img_lqs.append(img_gt)\n if self.flow_root is not None:\n img_results, img_flows = augment(img_lqs, self.opt['use_flip'],\n self.opt['use_rot'], img_flows)\n else:\n img_results = augment(img_lqs, self.opt['use_flip'],\n self.opt['use_rot'])\n\n img_results = img2tensor(img_results)\n img_lqs = torch.stack(img_results[0:-1], dim=0)\n img_gt = img_results[-1]\n\n if self.flow_root is not None:\n img_flows = img2tensor(img_flows)\n # add the zero center flow\n img_flows.insert(self.num_half_frames,\n torch.zeros_like(img_flows[0]))\n img_flows = torch.stack(img_flows, dim=0)\n\n # img_lqs: (t, c, h, w)\n # img_flows: (t, 2, h, w)\n # img_gt: (c, h, w)\n # key: str\n if self.flow_root is not None:\n return {'lq': img_lqs, 'flow': img_flows, 'gt': img_gt, 'key': key}\n else:\n return {'lq': img_lqs, 'gt': img_gt, 'key': key}\n\n def __len__(self):\n return len(self.keys)\n" ]
[ [ "torch.zeros_like", "torch.stack", "numpy.split" ] ]
ProFatXuanAll/char-RNN
[ "531f101b3d1ba20bafd28ca060aafe6f583d1efb" ]
[ "test/lmp/model/_lstm_1997/test_pred.py" ]
[ "\"\"\"Test prediction.\n\nTest target:\n- :py:meth:`lmp.model._lstm_1997.LSTM1997.pred`.\n\"\"\"\n\nimport torch\n\nfrom lmp.model._lstm_1997 import LSTM1997\n\n\ndef test_prediction_result(lstm_1997: LSTM1997, batch_cur_tkids: torch.Tensor) -> None:\n \"\"\"Return float tensor with correct shape and range.\"\"\"\n lstm_1997 = lstm_1997.eval()\n seq_len = batch_cur_tkids.size(1)\n\n batch_prev_states = None\n for i in range(seq_len):\n batch_next_tkids_pd, batch_prev_states = lstm_1997.pred(\n batch_cur_tkids=batch_cur_tkids[..., i],\n batch_prev_states=batch_prev_states,\n )\n\n # Output float tensor.\n assert batch_next_tkids_pd.dtype == torch.float\n\n # Shape: (batch_size, vocab_size).\n assert batch_next_tkids_pd.size() == torch.Size([batch_cur_tkids.shape[0], lstm_1997.emb.num_embeddings])\n\n # Probabilities are values within range [0, 1].\n assert torch.all(0 <= batch_next_tkids_pd).item()\n assert torch.all(batch_next_tkids_pd <= 1).item()\n\n # Sum of the probabilities equals to 1.\n accum = batch_next_tkids_pd.sum(dim=-1)\n assert torch.allclose(accum, torch.ones_like(accum))\n\n assert isinstance(batch_prev_states, list)\n assert len(batch_prev_states) == 2\n assert batch_prev_states[0].size() == torch.Size([batch_cur_tkids.size(0), lstm_1997.n_blk * lstm_1997.d_blk])\n assert batch_prev_states[1].size() == torch.Size([batch_cur_tkids.size(0), lstm_1997.n_blk, lstm_1997.d_blk])\n" ]
[ [ "torch.ones_like", "torch.all", "torch.Size" ] ]
prteek/sagemaker-boilerplate
[ "372ff5722e5f4b5e2d84008e7186762b5bade2ad" ]
[ "non_linear_model.py" ]
[ "#! /opt/conda/envs/env/bin/python\nimport argparse\nimport os\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport joblib\nfrom sklearn.metrics import accuracy_score\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"--model-dir\", default=\"/opt/ml/model/\")\n parser.add_argument(\"--training\", default=\"/opt/ml/input/data/training\")\n parser.add_argument(\"--alpha\", type=float, default=0.0001)\n \n args = parser.parse_args()\n \n \n df = pd.read_csv(os.path.join(args.training, 'transfusion.data'))\n\n predictors = ['Recency (months)', 'Time (months)', 'Frequency (times)', 'Monetary (c.c. blood)']\n\n target = 'whether he/she donated blood in March 2007'\n\n X = df[predictors]\n y = df[target]\n \n estimator = RandomForestClassifier()\n \n estimator.fit(X,y)\n \n print(f\"accuracy={accuracy_score(y, estimator.predict(X))};\")\n \n joblib.dump(estimator, os.path.join(args.model_dir, 'model.mdl'))\n \n " ]
[ [ "sklearn.ensemble.RandomForestClassifier" ] ]
Anon-Artist/tfx
[ "2692c9ab437d76b5d9517996bfe2596862e0791d" ]
[ "tfx/components/example_validator/component_test.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.example_validator.component.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tfx.components.example_validator import component\nfrom tfx.types import artifact_utils\nfrom tfx.types import channel_utils\nfrom tfx.types import standard_artifacts\nfrom tfx.types.standard_component_specs import ANOMALIES_KEY\nfrom tfx.types.standard_component_specs import EXCLUDE_SPLITS_KEY\n\n\nclass ExampleValidatorTest(tf.test.TestCase):\n\n def testConstruct(self):\n statistics_artifact = standard_artifacts.ExampleStatistics()\n statistics_artifact.split_names = artifact_utils.encode_split_names(\n ['train', 'eval'])\n exclude_splits = ['eval']\n example_validator = component.ExampleValidator(\n statistics=channel_utils.as_channel([statistics_artifact]),\n schema=channel_utils.as_channel([standard_artifacts.Schema()]),\n exclude_splits=exclude_splits)\n self.assertEqual(\n standard_artifacts.ExampleAnomalies.TYPE_NAME,\n example_validator.outputs[ANOMALIES_KEY].type_name)\n self.assertEqual(\n example_validator.spec.exec_properties[EXCLUDE_SPLITS_KEY], '[\"eval\"]')\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
JouniVatanen/NLP-and-Deep-Learning
[ "2fddcc2c39787713d33d17e80565de4ed073ca60" ]
[ "src/supervised_class/dt.py" ]
[ "# https://deeplearningcourses.com/c/data-science-supervised-machine-learning-in-python\n# https://www.udemy.com/data-science-supervised-machine-learning-in-python\n# Decision Tree for continuous-vector input, binary output\nfrom __future__ import print_function, division\nfrom future.utils import iteritems\nfrom builtins import range, input\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\n\nimport numpy as np\nfrom util import get_data, get_xor, get_donut\nfrom datetime import datetime\n\n\ndef entropy(y):\n # assume y is binary - 0 or 1\n N = len(y)\n s1 = (y == 1).sum()\n if 0 == s1 or N == s1:\n return 0\n p1 = float(s1) / N\n p0 = 1 - p1\n return -p0*np.log2(p0) - p1*np.log2(p1)\n\n\nclass TreeNode:\n def __init__(self, depth=0, max_depth=None):\n # print 'depth:', depth\n self.depth = depth\n self.max_depth = max_depth\n\n def fit(self, X, Y):\n if len(Y) == 1 or len(set(Y)) == 1:\n # base case, only 1 sample\n # another base case\n # this node only receives examples from 1 class\n # we can't make a split\n self.col = None\n self.split = None\n self.left = None\n self.right = None\n self.prediction = Y[0]\n\n else:\n D = X.shape[1]\n cols = range(D)\n\n max_ig = 0\n best_col = None\n best_split = None\n for col in cols:\n ig, split = self.find_split(X, Y, col)\n # print \"ig:\", ig\n if ig > max_ig:\n max_ig = ig\n best_col = col\n best_split = split\n\n if max_ig == 0:\n # nothing we can do\n # no further splits\n self.col = None\n self.split = None\n self.left = None\n self.right = None\n self.prediction = np.round(Y.mean())\n else:\n self.col = best_col\n self.split = best_split\n\n if self.depth == self.max_depth:\n self.left = None\n self.right = None\n self.prediction = [\n np.round(Y[X[:,best_col] < self.split].mean()),\n np.round(Y[X[:,best_col] >= self.split].mean()),\n ]\n else:\n # print \"best split:\", best_split\n left_idx = (X[:,best_col] < best_split)\n # print \"left_idx.shape:\", left_idx.shape, \"len(X):\", len(X)\n Xleft = X[left_idx]\n Yleft = Y[left_idx]\n self.left = TreeNode(self.depth + 1, self.max_depth)\n self.left.fit(Xleft, Yleft)\n\n right_idx = (X[:,best_col] >= best_split)\n Xright = X[right_idx]\n Yright = Y[right_idx]\n self.right = TreeNode(self.depth + 1, self.max_depth)\n self.right.fit(Xright, Yright)\n\n def find_split(self, X, Y, col):\n # print \"finding split for col:\", col\n x_values = X[:, col]\n sort_idx = np.argsort(x_values)\n x_values = x_values[sort_idx]\n y_values = Y[sort_idx]\n\n # Note: optimal split is the midpoint between 2 points\n # Note: optimal split is only on the boundaries between 2 classes\n\n # if boundaries[i] is true\n # then y_values[i] != y_values[i+1]\n # nonzero() gives us indices where arg is true\n # but for some reason it returns a tuple of size 1\n boundaries = np.nonzero(y_values[:-1] != y_values[1:])[0]\n best_split = None\n max_ig = 0\n for b in boundaries:\n split = (x_values[b] + x_values[b+1]) / 2\n ig = self.information_gain(x_values, y_values, split)\n if ig > max_ig:\n max_ig = ig\n best_split = split\n return max_ig, best_split\n\n def information_gain(self, x, y, split):\n # assume classes are 0 and 1\n # print \"split:\", split\n y0 = y[x < split]\n y1 = y[x >= split]\n N = len(y)\n y0len = len(y0)\n if y0len == 0 or y0len == N:\n return 0\n p0 = float(len(y0)) / N\n p1 = 1 - p0 #float(len(y1)) / N\n # print \"entropy(y):\", entropy(y)\n # print \"p0:\", p0\n # print \"entropy(y0):\", entropy(y0)\n # print \"p1:\", p1\n # print \"entropy(y1):\", entropy(y1)\n return entropy(y) - p0*entropy(y0) - p1*entropy(y1)\n\n def predict_one(self, x):\n # use \"is not None\" because 0 means False\n if self.col is not None and self.split is not None:\n feature = x[self.col]\n if feature < self.split:\n if self.left:\n p = self.left.predict_one(x)\n else:\n p = self.prediction[0]\n else:\n if self.right:\n p = self.right.predict_one(x)\n else:\n p = self.prediction[1]\n else:\n # corresponds to having only 1 prediction\n p = self.prediction\n return p\n\n def predict(self, X):\n N = len(X)\n P = np.zeros(N)\n for i in range(N):\n P[i] = self.predict_one(X[i])\n return P\n\n\n# This class is kind of redundant\nclass DecisionTree:\n def __init__(self, max_depth=None):\n self.max_depth = max_depth\n\n def fit(self, X, Y):\n self.root = TreeNode(max_depth=self.max_depth)\n self.root.fit(X, Y)\n\n def predict(self, X):\n return self.root.predict(X)\n\n def score(self, X, Y):\n P = self.predict(X)\n return np.mean(P == Y)\n\n\nif __name__ == '__main__':\n X, Y = get_data()\n\n # try donut and xor\n # from sklearn.utils import shuffle\n # X, Y = get_xor()\n # # X, Y = get_donut()\n # X, Y = shuffle(X, Y)\n\n # only take 0s and 1s since we're doing binary classification\n idx = np.logical_or(Y == 0, Y == 1)\n X = X[idx]\n Y = Y[idx]\n\n # split the data\n Ntrain = len(Y) // 2\n Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]\n Xtest, Ytest = X[Ntrain:], Y[Ntrain:]\n \n model = DecisionTree()\n # model = DecisionTree(max_depth=7)\n t0 = datetime.now()\n model.fit(Xtrain, Ytrain)\n print(\"Training time:\", (datetime.now() - t0))\n\n t0 = datetime.now()\n print(\"Train accuracy:\", model.score(Xtrain, Ytrain))\n print(\"Time to compute train accuracy:\", (datetime.now() - t0))\n\n t0 = datetime.now()\n print(\"Test accuracy:\", model.score(Xtest, Ytest))\n print(\"Time to compute test accuracy:\", (datetime.now() - t0))\n" ]
[ [ "numpy.logical_or", "numpy.log2", "numpy.zeros", "numpy.argsort", "numpy.nonzero", "numpy.mean" ] ]
liuhuaijjin/rpn_rois_proposals_layers
[ "c5f9f09b3ae8c52e4b6fa3fda391f993cb7d42c1" ]
[ "lib/net/pcn_coarse.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n\n # first shared mlp\n self.conv1 = nn.Conv1d(3, 128, 1)\n self.conv2 = nn.Conv1d(128, 256, 1)\n self.bn1 = nn.BatchNorm1d(128)\n self.bn2 = nn.BatchNorm1d(256)\n\n # second shared mlp\n self.conv3 = nn.Conv1d(512, 512, 1)\n self.conv4 = nn.Conv1d(512, 1024, 1)\n self.bn3 = nn.BatchNorm1d(512)\n self.bn4 = nn.BatchNorm1d(1024)\n \n def forward(self, x):\n n = x.size()[2]\n\n # first shared mlp\n x = F.relu(self.bn1(self.conv1(x))) # (B, 128, N)\n f = self.bn2(self.conv2(x)) # (B, 256, N)\n \n # point-wise maxpool\n g = torch.max(f, dim=2, keepdim=True)[0] # (B, 256, 1)\n \n # expand and concat\n x = torch.cat([g.repeat(1, 1, n), f], dim=1) # (B, 512, N)\n\n # second shared mlp\n x = F.relu(self.bn3(self.conv3(x))) # (B, 512, N)\n x = self.bn4(self.conv4(x)) # (B, 1024, N)\n \n # point-wise maxpool\n v = torch.max(x, dim=-1)[0] # (B, 1024)\n \n return v\n\n\nclass Decoder(nn.Module):\n def __init__(self, num_coarse=1024, num_dense=16384):\n super(Decoder, self).__init__()\n\n self.num_coarse = num_coarse\n \n # fully connected layers\n self.linear1 = nn.Linear(1024, 1024)\n self.linear2 = nn.Linear(1024, 1024)\n self.linear3 = nn.Linear(1024, 3 * num_coarse)\n self.bn1 = nn.BatchNorm1d(1024)\n self.bn2 = nn.BatchNorm1d(1024)\n\n # shared mlp\n self.conv1 = nn.Conv1d(3+2+1024, 512, 1)\n self.conv2 = nn.Conv1d(512, 512, 1)\n self.conv3 = nn.Conv1d(512, 3, 1)\n self.bn3 = nn.BatchNorm1d(512)\n self.bn4 = nn.BatchNorm1d(512)\n\n # 2D grid\n grids = np.meshgrid(np.linspace(-0.05, 0.05, 4, dtype=np.float32),\n np.linspace(-0.05, 0.05, 4, dtype=np.float32)) # (2, 4, 44)\n self.grids = torch.Tensor(grids).view(2, -1) # (2, 4, 4) -> (2, 16)\n \n def forward(self, x):\n b = x.size()[0]\n # global features\n v = x # (B, 1024)\n\n # fully connected layers to generate the coarse output\n x = F.relu(self.bn1(self.linear1(x)))\n x = F.relu(self.bn2(self.linear2(x)))\n x = self.linear3(x)\n y_coarse = x.view(-1, 3, self.num_coarse) # (B, 3, 1024)\n\n #repeated_centers = y_coarse.unsqueeze(3).repeat(1, 1, 1, 16).view(b, 3, -1) # (B, 3, 16x1024)\n #repeated_v = v.unsqueeze(2).repeat(1, 1, 16 * self.num_coarse) # (B, 1024, 16x1024)\n #grids = self.grids.to(x.device) # (2, 16)\n #grids = grids.unsqueeze(0).repeat(b, 1, self.num_coarse) # (B, 2, 16x1024)\n\n #x = torch.cat([repeated_v, grids, repeated_centers], dim=1) # (B, 2+3+1024, 16x1024)\n #x = F.relu(self.bn3(self.conv1(x)))\n #x = F.relu(self.bn4(self.conv2(x)))\n #x = self.conv3(x) # (B, 3, 16x1024)\n #y_detail = x + repeated_centers # (B, 3, 16x1024)\n\n return y_coarse\n\n\nclass AutoEncoder(nn.Module):\n def __init__(self):\n super(AutoEncoder, self).__init__()\n\n self.encoder = Encoder()\n self.decoder = Decoder()\n self.load_state_dict(torch.load('../../../model/pcn.pth', map_location=lambda storage, loc: storage))\n\n def forward(self, x):\n with torch.no_grad():\n v = self.encoder(x)\n y_coarse = self.decoder(v)\n return v, y_coarse\n\n\n# if __name__ == \"__main__\":\n# pcs = torch.rand(16, 3, 2048)\n# encoder = Encoder()\n# v = encoder(pcs)\n# print(v.size())\n#\n# decoder = Decoder()\n# decoder(v)\n# y_c, y_d = decoder(v)\n# print(y_c.size(), y_d.size())\n#\n# ae = AutoEncoder()\n# v, y_coarse, y_detail = ae(pcs)\n# print(v.size(), y_coarse.size(), y_detail.size())\n" ]
[ [ "torch.nn.Linear", "torch.load", "torch.nn.BatchNorm1d", "torch.no_grad", "torch.nn.Conv1d", "torch.max", "numpy.linspace", "torch.Tensor" ] ]
uwmadison-chm/redcap-examples
[ "376d62cf18a7e76265fc251425d2bfae3a2c6521" ]
[ "download_longitudinal.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nDownloads the data for a longitudinal REDCap project, with each event as \na separate file. Also allows filtering by standard REDCap logic, so you can,\nfor example, exclude non-enrolled participants from your data.\n\nRequires an instrument_download_list_file, which is a CSV file containing\nthe fields \"instrument_name\" and \"download\". If download is not blank, the\ninstrument will be included in the download.\n\nOnly instruments belonging to the given event are downloaded in the event's\nfile, to keep file sizes down and column counts more manageable.\n\nThe API URL, API token, and filter logic are all passed using environment\nvariables: API_URL, API_TOK, and FILTER, respectively.\n\nRequires the PyCap library: https://pycap.readthedocs.io/en/latest/\n\"\"\"\n\nimport sys\nimport os\n\nimport redcap\nimport pandas as pd\n\nfrom pathlib import Path\n\nimport logging\nlogging.basicConfig(format='%(message)s')\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nAPI_URL = os.environ['REDCAP_API_URL']\nAPI_TOK = os.environ['REDCAP_API_TOKEN']\nFILTER = os.environ.get('REDCAP_FILTER_LOGIC')\nPROJ = redcap.Project(API_URL, API_TOK)\n\n\ndef filtered_ids():\n id_field = PROJ.field_names[0]\n first_event = PROJ.events[0]\n record_names = PROJ.export_records(\n fields=[id_field],\n filter_logic=FILTER,\n format='df'\n )\n logger.debug(record_names.index)\n return list(record_names.index.get_level_values(0))\n\n\ndef main(instrument_download_list_file, out_path):\n # We need this because we can't filter using data that doesn't occur\n # in the target event, because redcap is kinda dumb\n ids = filtered_ids()\n form_events = PROJ.export_fem(format='df')\n all_form_list = pd.read_csv(instrument_download_list_file)\n selected_forms = frozenset(all_form_list.dropna()['instrument_name'])\n logger.debug(f'Forms to download: {selected_forms}')\n\n for event_name, event_rows in form_events.groupby(by='unique_event_name'):\n available_forms = frozenset(event_rows['form'])\n download_forms = selected_forms & available_forms\n logger.debug(f'Event {event_name}: Downloading {download_forms}')\n data = PROJ.export_records(\n records=ids,\n events=[event_name],\n forms=download_forms,\n export_survey_fields=False,\n export_checkbox_labels=True,\n format='df',\n df_kwargs={\n 'dtype': 'str'\n }\n )\n out_filename = out_path / f'{event_name}.csv'\n data.to_csv(out_filename, index=False)\n\n\nif __name__ == \"__main__\":\n instrument_download_list_file = sys.argv[1]\n out_dir = sys.argv[2]\n out_path = Path(out_dir)\n main(instrument_download_list_file, out_path)\n" ]
[ [ "pandas.read_csv" ] ]
mayank42/Earthquake-Signal-Analysis-using-Deep-CNN
[ "fdeca035277acd1b37759c546c90bcdd63168412" ]
[ "pv.py" ]
[ "\"\"\" \npv.py\nPhase Vocoder implementation in Python\n\nThe MIT License (MIT)\n\nCopyright (c) 2015 multivac61\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\"\"\"\n\nimport sys\nimport numpy as np\nfrom math import floor\n\n# CONSTANTS\nepsilon = sys.float_info.epsilon\n\n\nclass PhaseVocoder(object):\n\t\"\"\"docstring for PhaseVocoder\"\"\"\n\tdef __init__(self, N=2**12, M=2**12, Rs=(2**12/8), w=np.hanning(2**12), alpha=1):\n\t\tsuper(PhaseVocoder, self).__init__()\n\t\tself.N\t = N\t\t# FFT size\n\t\tself.M \t = M\t\t# Window size\n\t\tself.Rs \t= Rs \t# Synthesis hop size\n\t\tself.alpha = alpha\t# Timestretch factor\n\t\tself.w = w \t# Analysis/Synthesis window\n\n\tdef timestretch(self, x, alpha):\n\t\t\"\"\"\n\t\tPerform timestretch of a factor alpha to signal x\n\t\tx: input signal, alpha: timestrech factor\n\t\treturns: a signal of length T*alpha\n\t\t\"\"\"\n\t\t# Analysis/Synthesis window function\n\t\tw = self.w; N = self.N; M = self.M\n\t\thM1 = int(floor((M-1)/2.))\n\t\thM2 = int(floor(M/2.))\n\n\t\t# Synthesis and analysis hop sizes\n\t\tRs = self.Rs\n\t\tRa = int(self.Rs / float(alpha))\n\n\t\t# AM scaling factor due to window sliding\n\t\twscale = sum([i**2 for i in w]) / float(Rs)\n\t\tL = x.size\n\t\tL0 = int(x.size*alpha)\n\n\t\t# Get an prior approximation of the fundamental frequency\n\t\tif alpha != 1.0:\n\t\t\tA = np.fft.fft(w*x[0:N])\n\t\t\tB = np.fft.fft(w*x[Ra:Ra+N])\n\t\t\tA[A == 0] = epsilon\n\t\t\tB[B == 0] = epsilon\n\t\t\tFreq0 = B/A * abs(B/A)\n\t\t\tFreq0[Freq0 == 0] = epsilon\n\t\telse:\n\t\t\tFreq0 = 1\n\n\t\tif alpha == 1.0: \t# we can fully retrieve the input (within numerical errors)\n\t\t\t# Place input signal directly over half of window\n\t\t\tx = np.append(np.zeros(N+Rs), x)\n\t\t\tx = np.append(x, np.zeros(N+Rs))\n\n\t\t\t# Initialize output signal\n\t\t\ty = np.zeros(x.size)\n\t\telse:\n\t\t\tx = np.append(np.zeros(Rs), x)\n\t\t\t#x = np.append(x, np.zeros(Rs))\n\n\t\t\ty = np.zeros(int((x.size)*alpha + x.size/Ra * alpha))\n\n\t\t# Pointers and initializations\n\t\tp, pp = 0, 0\n\t\tpend = x.size - (Rs+N)\n\t\tYold = epsilon\n\n\t\ti = 0\n\t\twhile p <= pend:\n\t\t\ti += 1\n\t\t\t# Spectra of two consecutive windows\n\t\t\tXs = np.fft.fft(w*x[p:p+N])\n\t\t\tXt = np.fft.fft(w*x[p+Rs:p+Rs+N])\n\n\t\t\t# Prohibit dividing by zero\n\t\t\tXs[Xs == 0] = epsilon\n\t\t\tXt[Xt == 0] = epsilon\n\n\t\t\t# inverse FFT and overlap-add\n\t\t\tif p > 0 :\n\t\t\t\tY = Xt * (Yold / Xs) / abs(Yold / Xs)\n\t\t\telse:\n\t\t\t\tY = Xt * Freq0\n\n\t\t\tYold = Y\n\t\t\tYold[Yold == 0] = epsilon\n\t\t\t\n\n\t\t\ty[pp:pp+N] += np.array([c.real for c in w*np.fft.ifft(Y)])\n\t\t\t\n\t\t\tp = int(p+Ra)\t\t# analysis hop\n\t\t\tpp += Rs\t\t\t# synthesis hop\n\n\t\t\t#sys.stdout.write (\"Percentage finishied: %d %% \\r\" % int(100.0*p/pend))\n\t\t\t#sys.stdout.flush()\n\n\t\ty = y / wscale\n\n\n\t\tif self.alpha == 1.0:\n\t\t\t# retrieve input signal perfectly\n\t\t\tx = np.delete(x, range(N+Rs))\n\t\t\tx = np.delete(x, range(x.size-(N+Rs), x.size))\n\t\t\t\t\t\t\n\t\t\ty = np.delete(y, range(N))\n\t\t\ty = np.delete(y, range(y.size-(N+2*Rs), y.size))\n\t\telse:\n\t\t\t# retrieve input signal perfectly\n\t\t\tx = np.delete(x, range(Rs))\n\n\t\t\ty = np.delete(y, range(Rs))\n\t\t\ty = np.delete(y, range(L0, y.size))\n\t\t\t\t\t\t\t\n\t\treturn y\n" ]
[ [ "numpy.fft.ifft", "numpy.fft.fft", "numpy.hanning", "numpy.zeros" ] ]
KleinYuan/tf-tailor
[ "70e85643a49ad4484ef856c22a0de2ae8a5a00e9" ]
[ "models/tf_server.py" ]
[ "import tensorflow as tf\n\n\nclass TFServer(object):\n\n\tdef __init__(self, config):\n\t\ttf.reset_default_graph()\n\t\tself.in_progress = False\n\t\tself.prediction = None\n\t\tself.session = None\n\t\tself.graph = None\n\t\tself.frozen = False\n\t\tself.feed_dict = {}\n\t\tself.output_ops = []\n\t\tself.input_ops = []\n\t\tself.model_fp = config.model_fp\n\t\tself.input_tensor_names = config.input_tensor_names\n\t\tself.output_tensor_names = config.output_tensor_names\n\t\tself.device = config.device\n\n\t\twith tf.device(self.device):\n\t\t\tself._load_graph()\n\t\t\tself._init_predictor()\n\n\tdef _load_graph(self):\n\t\tself.graph = tf.Graph()\n\t\twith self.graph.as_default():\n\t\t\tod_graph_def = tf.GraphDef()\n\t\t\twith tf.gfile.GFile(self.model_fp, 'rb') as fid:\n\t\t\t\tserialized_graph = fid.read()\n\t\t\t\tod_graph_def.ParseFromString(serialized_graph)\n\t\t\t\ttf.import_graph_def(od_graph_def, name='')\n\t\ttf.get_default_graph().finalize()\n\n\tdef _init_predictor(self):\n\t\ttf_config = tf.ConfigProto()\n\t\ttf_config.gpu_options.allow_growth = True\n\t\twith self.graph.as_default():\n\t\t\tself.session = tf.Session(config=tf_config, graph=self.graph)\n\t\t\tself._fetch_tensors()\n\n\tdef _fetch_tensors(self):\n\t\tassert len(self.input_tensor_names) > 0\n\t\tassert len(self.output_tensor_names) > 0\n\t\tfor _tensor_name in self.input_tensor_names:\n\t\t\t_op = self.graph.get_tensor_by_name(_tensor_name)\n\t\t\tself.input_ops.append(_op)\n\t\t\tself.feed_dict[_op] = None\n\t\tfor _tensor_name in self.output_tensor_names:\n\t\t\t_op = self.graph.get_tensor_by_name(_tensor_name)\n\t\t\tself.output_ops.append(_op)\n\n\tdef _set_feed_dict(self, data):\n\t\tassert len(data) == len(self.input_ops), 'Data len = {} | Input Ops len = {}'.format(len(data), len(self.input_ops))\n\t\twith self.graph.as_default():\n\t\t\tfor ind, op in enumerate(self.input_ops):\n\t\t\t\tself.feed_dict[op] = data[ind]\n\n\tdef inference(self, data):\n\t\tself.in_progress = True\n\n\t\twith self.graph.as_default():\n\t\t\tself._set_feed_dict(data=data)\n\t\t\tself.prediction = self.session.run(self.output_ops, feed_dict=self.feed_dict)\n\t\tself.in_progress = False\n\n\t\treturn self.prediction\n\n\tdef get_status(self):\n\t\treturn self.in_progress\n\n\tdef clean_up(self):\n\t\t# In old version tensorflow\n\t\t# session sometimes will not be closed automatically\n\t\tself.session.close()\n\t\tself.session = None\n\t\ttf.reset_default_graph()" ]
[ [ "tensorflow.gfile.GFile", "tensorflow.device", "tensorflow.Graph", "tensorflow.Session", "tensorflow.import_graph_def", "tensorflow.get_default_graph", "tensorflow.reset_default_graph", "tensorflow.ConfigProto", "tensorflow.GraphDef" ] ]
RajaSudalai/Motorcyclist_Helmet_Detection
[ "5f739115ed1453ce01a51204219abe080e159761" ]
[ "mrcnn/model.py" ]
[ "\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(),array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\",\"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n \n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n" ]
[ [ "numpy.sum", "numpy.ones", "tensorflow.sparse_tensor_to_dense", "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.gather_nd", "tensorflow.round", "tensorflow.nn.top_k", "numpy.any", "tensorflow.logical_and", "numpy.copy", "tensorflow.squeeze", "tensorflow.abs", "numpy.log", "tensorflow.concat", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.identity", "numpy.amax", "numpy.stack", "tensorflow.Variable", "tensorflow.split", "tensorflow.reduce_sum", "tensorflow.divide", "tensorflow.minimum", "numpy.concatenate", "numpy.reshape", "numpy.fliplr", "numpy.random.choice", "tensorflow.multiply", "numpy.abs", "numpy.expand_dims", "numpy.delete", "tensorflow.constant", "tensorflow.transpose", "numpy.where", "tensorflow.stack", "tensorflow.shape", "numpy.zeros", "tensorflow.add_n", "tensorflow.expand_dims", "tensorflow.random_shuffle", "numpy.argmax", "numpy.arange", "tensorflow.cast", "numpy.hstack", "numpy.max", "tensorflow.boolean_mask", "numpy.sort", "numpy.broadcast_to", "tensorflow.control_dependencies", "tensorflow.pad", "tensorflow.size", "tensorflow.equal", "numpy.divide", "numpy.empty", "numpy.random.shuffle", "tensorflow.map_fn", "tensorflow.range", "tensorflow.sqrt", "tensorflow.reduce_mean", "tensorflow.stop_gradient", "tensorflow.unique", "tensorflow.exp", "tensorflow.image.non_max_suppression", "tensorflow.image.crop_and_resize", "tensorflow.where", "tensorflow.argmax", "numpy.array", "tensorflow.log", "tensorflow.gather", "numpy.random.randint", "tensorflow.maximum" ] ]
leo6033/Graduation-Project
[ "c1cf68edaffc346b37ac6e615d580cd05c4f0711" ]
[ "TextCNN.py" ]
[ "\"\"\"\n@Description: TextCNN 网络\n@Author: 吕明伟\n@Date: 2021-4-6\n\"\"\"\nfrom tensorflow.keras import Input, Model\nfrom tensorflow.keras.layers import Embedding, Dense, Conv1D, GlobalMaxPooling1D, Concatenate, Dropout\n\nclass TextCNN(object):\n def __init__(self, maxlen, max_features, embedding_dims,\n class_num=5,\n last_activation='softmax'):\n self.maxlen = maxlen\n self.max_features = max_features\n self.embedding_dims = embedding_dims\n self.class_num = class_num\n self.last_activation = last_activation\n\n def get_model(self):\n input = Input((self.maxlen,))\n embedding = Embedding(self.max_features, self.embedding_dims, input_length=self.maxlen, mask_zero=True)(input)\n convs = []\n for kernel_size in [3, 4, 5]:\n c = Conv1D(128, kernel_size, activation='relu')(embedding)\n c = GlobalMaxPooling1D()(c)\n convs.append(c)\n x = Concatenate()(convs)\n\n output = Dense(self.class_num, activation=self.last_activation)(x)\n model = Model(inputs=input, outputs=output)\n return model" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.GlobalMaxPooling1D", "tensorflow.keras.Input" ] ]
jtapanes21/RADGIS
[ "2322f75f23cec4dde9f8c7b21d9137f1986e6382" ]
[ "RADGIS/preprocessing/knn.py" ]
[ "from sklearn.neighbors import BallTree\nimport numpy as np\n\n'''\nReturns the KNN and distance in meters to the KNN.\n\nParameters\n ----------\n left_gdf : GeoDataFrame\n the target geodataframe; all columns are kept.\n \n right_gdf : GeoDataFrame\n the geodataframe that is the subject of the measurement.\n \n keep_columns : list of strings\n the columns in the right_gdf that are kept.\n \n return_dist : boolean, optional\n if True, the distance in meters is added.\n Returns\n -------\n \n GeoDataFrame\n a GeoDataFrame with all of the columns from the left_gdf and only\n the columns from the right_gdf specified in keep_columns parameter.\n\n\nTaken from https://automating-gis-processes.github.io/site/notebooks/L3/nearest-neighbor-faster.html#Efficient-nearest-neighbor-search-with-Geopandas-and-scikit-learn\n\n\n'''\n\ndef _get_nearest(src_points, candidates, k_neighbors=1):\n \"\"\"Find nearest neighbors for all source points from a set of candidate points\"\"\"\n\n # Create tree from the candidate points\n tree = BallTree(candidates, leaf_size=15, metric='haversine')\n\n # Find closest points and distances\n distances, indices = tree.query(src_points, k=k_neighbors)\n\n # Transpose to get distances and indices into arrays\n distances = distances.transpose()\n indices = indices.transpose()\n\n # Get closest indices and distances (i.e. array at index 0)\n # note: for the second closest points, you would take index 1, etc.\n closest = indices[0]\n closest_dist = distances[0]\n\n # Return indices and distances\n return (closest, closest_dist)\n\n\n\n\ndef _complete_neighbor(left_gdf, right_gdf, return_dist):\n \"\"\"\n For each point in left_gdf, find closest point in right GeoDataFrame and return them.\n\n NOTICE: Assumes that the input Points are in WGS84 projection (lat/lon).\n \"\"\"\n\n left_geom_col = left_gdf.geometry.name\n right_geom_col = right_gdf.geometry.name\n\n # Ensure that index in right gdf is formed of sequential numbers\n right = right_gdf.copy().reset_index(drop=True)\n\n # Parse coordinates from points and insert them into a numpy array as RADIANS\n left_radians = np.array(left_gdf[left_geom_col].apply(lambda geom: (geom.x * np.pi / 180, geom.y * np.pi / 180)).to_list())\n right_radians = np.array(right[right_geom_col].apply(lambda geom: (geom.x * np.pi / 180, geom.y * np.pi / 180)).to_list())\n\n # Find the nearest points\n # -----------------------\n # closest ==> index in right_gdf that corresponds to the closest point\n # dist ==> distance between the nearest neighbors (in meters)\n\n closest, dist = _get_nearest(src_points=left_radians, candidates=right_radians)\n\n # Return points from right GeoDataFrame that are closest to points in left GeoDataFrame\n closest_points = right.loc[closest]\n\n # Ensure that the index corresponds the one in left_gdf\n closest_points = closest_points.reset_index(drop=True)\n\n # Add distance if requested\n if return_dist:\n # Convert to meters from radians\n earth_radius = 6371000 # meters\n closest_points['distance'] = dist * earth_radius\n\n return closest_points\n\ndef nearest_neighbor(left_gdf, right_gdf, keep_columns, return_dist=False):\n keep_columns.append(\"distance\")\n knn = _complete_neighbor(left_gdf, right_gdf, return_dist=return_dist)\n knn = knn[keep_columns]\n knn_join = left_gdf.join(knn.add_suffix(\"_knn\"))\n return knn_join\n \n \n \n " ]
[ [ "sklearn.neighbors.BallTree" ] ]
bianxg/BackgroundMattingV2
[ "af15097de99564f5042121601abe4050cc2e3c2e" ]
[ "collect_env.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\r\nimport importlib\r\nimport numpy as np\r\nimport os\r\nimport re\r\nimport subprocess\r\nimport sys\r\nfrom collections import defaultdict\r\nimport PIL\r\nimport torch\r\nimport torchvision\r\nfrom tabulate import tabulate\r\n\r\n__all__ = [\"collect_env_info\"]\r\n\r\n\r\ndef collect_torch_env():\r\n try:\r\n import torch.__config__\r\n\r\n return torch.__config__.show()\r\n except ImportError:\r\n # compatible with older versions of pytorch\r\n from torch.utils.collect_env import get_pretty_env_info\r\n\r\n return get_pretty_env_info()\r\n\r\n\r\ndef get_env_module():\r\n var_name = \"DETECTRON2_ENV_MODULE\"\r\n return var_name, os.environ.get(var_name, \"<not set>\")\r\n\r\n\r\ndef detect_compute_compatibility(CUDA_HOME, so_file):\r\n try:\r\n cuobjdump = os.path.join(CUDA_HOME, \"bin\", \"cuobjdump\")\r\n if os.path.isfile(cuobjdump):\r\n output = subprocess.check_output(\r\n \"'{}' --list-elf '{}'\".format(cuobjdump, so_file), shell=True\r\n )\r\n output = output.decode(\"utf-8\").strip().split(\"\\n\")\r\n arch = []\r\n for line in output:\r\n line = re.findall(r\"\\.sm_([0-9]*)\\.\", line)[0]\r\n arch.append(\".\".join(line))\r\n arch = sorted(set(arch))\r\n return \", \".join(arch)\r\n else:\r\n return so_file + \"; cannot find cuobjdump\"\r\n except Exception:\r\n # unhandled failure\r\n return so_file\r\n\r\n\r\ndef collect_env_info():\r\n has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM\r\n torch_version = torch.__version__\r\n\r\n # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional\r\n from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME\r\n\r\n has_rocm = False\r\n if (getattr(torch.version, \"hip\", None) is not None) and (ROCM_HOME is not None):\r\n has_rocm = True\r\n has_cuda = has_gpu and (not has_rocm)\r\n\r\n data = []\r\n data.append((\"sys.platform\", sys.platform)) # check-template.yml depends on it\r\n data.append((\"Python\", sys.version.replace(\"\\n\", \"\")))\r\n data.append((\"numpy\", np.__version__))\r\n\r\n try:\r\n import detectron2 # noqa\r\n\r\n data.append(\r\n (\"detectron2\", detectron2.__version__ + \" @\" + os.path.dirname(detectron2.__file__))\r\n )\r\n except ImportError:\r\n data.append((\"detectron2\", \"failed to import\"))\r\n\r\n try:\r\n import detectron2._C as _C\r\n except ImportError as e:\r\n data.append((\"detectron2._C\", f\"not built correctly: {e}\"))\r\n\r\n # print system compilers when extension fails to build\r\n if sys.platform != \"win32\": # don't know what to do for windows\r\n try:\r\n # this is how torch/utils/cpp_extensions.py choose compiler\r\n cxx = os.environ.get(\"CXX\", \"c++\")\r\n cxx = subprocess.check_output(\"'{}' --version\".format(cxx), shell=True)\r\n cxx = cxx.decode(\"utf-8\").strip().split(\"\\n\")[0]\r\n except subprocess.SubprocessError:\r\n cxx = \"Not found\"\r\n data.append((\"Compiler ($CXX)\", cxx))\r\n\r\n if has_cuda and CUDA_HOME is not None:\r\n try:\r\n nvcc = os.path.join(CUDA_HOME, \"bin\", \"nvcc\")\r\n nvcc = subprocess.check_output(\"'{}' -V\".format(nvcc), shell=True)\r\n nvcc = nvcc.decode(\"utf-8\").strip().split(\"\\n\")[-1]\r\n except subprocess.SubprocessError:\r\n nvcc = \"Not found\"\r\n data.append((\"CUDA compiler\", nvcc))\r\n if has_cuda and sys.platform != \"win32\":\r\n try:\r\n so_file = importlib.util.find_spec(\"detectron2._C\").origin\r\n except ImportError:\r\n pass\r\n else:\r\n data.append(\r\n (\"detectron2 arch flags\", detect_compute_compatibility(CUDA_HOME, so_file))\r\n )\r\n else:\r\n # print compilers that are used to build extension\r\n data.append((\"Compiler\", _C.get_compiler_version()))\r\n data.append((\"CUDA compiler\", _C.get_cuda_version())) # cuda or hip\r\n if has_cuda and getattr(_C, \"has_cuda\", lambda: True)():\r\n data.append(\r\n (\"detectron2 arch flags\", detect_compute_compatibility(CUDA_HOME, _C.__file__))\r\n )\r\n\r\n data.append(get_env_module())\r\n data.append((\"PyTorch\", torch_version + \" @\" + os.path.dirname(torch.__file__)))\r\n data.append((\"PyTorch debug build\", torch.version.debug))\r\n\r\n data.append((\"GPU available\", has_gpu))\r\n if has_gpu:\r\n devices = defaultdict(list)\r\n for k in range(torch.cuda.device_count()):\r\n cap = \".\".join((str(x) for x in torch.cuda.get_device_capability(k)))\r\n name = torch.cuda.get_device_name(k) + f\" (arch={cap})\"\r\n devices[name].append(str(k))\r\n for name, devids in devices.items():\r\n data.append((\"GPU \" + \",\".join(devids), name))\r\n\r\n if has_rocm:\r\n msg = \" - invalid!\" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else \"\"\r\n data.append((\"ROCM_HOME\", str(ROCM_HOME) + msg))\r\n else:\r\n msg = \" - invalid!\" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else \"\"\r\n data.append((\"CUDA_HOME\", str(CUDA_HOME) + msg))\r\n\r\n cuda_arch_list = os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None)\r\n if cuda_arch_list:\r\n data.append((\"TORCH_CUDA_ARCH_LIST\", cuda_arch_list))\r\n data.append((\"Pillow\", PIL.__version__))\r\n\r\n try:\r\n data.append(\r\n (\r\n \"torchvision\",\r\n str(torchvision.__version__) + \" @\" + os.path.dirname(torchvision.__file__),\r\n )\r\n )\r\n if has_cuda:\r\n try:\r\n torchvision_C = importlib.util.find_spec(\"torchvision._C\").origin\r\n msg = detect_compute_compatibility(CUDA_HOME, torchvision_C)\r\n data.append((\"torchvision arch flags\", msg))\r\n except ImportError:\r\n data.append((\"torchvision._C\", \"Not found\"))\r\n except AttributeError:\r\n data.append((\"torchvision\", \"unknown\"))\r\n\r\n try:\r\n import fvcore\r\n\r\n data.append((\"fvcore\", fvcore.__version__))\r\n except ImportError:\r\n pass\r\n\r\n try:\r\n import iopath\r\n\r\n data.append((\"iopath\", iopath.__version__))\r\n except (ImportError, AttributeError):\r\n pass\r\n\r\n try:\r\n import cv2\r\n\r\n data.append((\"cv2\", cv2.__version__))\r\n except ImportError:\r\n data.append((\"cv2\", \"Not found\"))\r\n env_str = tabulate(data) + \"\\n\"\r\n env_str += collect_torch_env()\r\n return env_str\r\n\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n from detectron2.utils.collect_env import collect_env_info as f\r\n\r\n print(f())\r\n except ImportError:\r\n print(collect_env_info())\r\n\r\n if torch.cuda.is_available():\r\n for k in range(torch.cuda.device_count()):\r\n device = f\"cuda:{k}\"\r\n try:\r\n x = torch.tensor([1, 2.0], dtype=torch.float32)\r\n x = x.to(device)\r\n except Exception as e:\r\n print(\r\n f\"Unable to copy tensor to device={device}: {e}. \"\r\n \"Your CUDA environment is broken.\"\r\n )\r\n" ]
[ [ "torch.utils.collect_env.get_pretty_env_info", "torch.tensor", "torch.cuda.device_count", "torch.cuda.is_available", "torch.__config__.show", "torch.cuda.get_device_capability", "torch.cuda.get_device_name" ] ]
ysy9893/object_recognition_with_hand_gesture
[ "fc43211a1e6fe8a19f726f156cf276a8acdcb246" ]
[ "motpy_edit/tracker.py" ]
[ "import uuid\nfrom collections.abc import Iterable\nfrom typing import Any, Optional, Sequence, Union\n\nimport numpy as np\nimport scipy\nfrom filterpy.kalman import KalmanFilter\n\nfrom motpy.core import Box, Detection, Track, Vector, setup_logger\nfrom motpy.metrics import angular_similarity, calculate_iou\nfrom motpy.model import Model, ModelPreset\n\nlogger = setup_logger(__name__)\n\n\ndef get_single_object_tracker(model: Model, x0: Optional[Vector] = None) -> KalmanFilter:\n \"\"\" returns Kalman-based tracker based on a specified motion model spec.\n e.g. for spec = {'order_pos': 1, 'dim_pos': 2, 'order_size': 0, 'dim_size': 1}\n we expect the following setup:\n state x, x', y, y', w, h\n where x and y are centers of boxes\n w and h are width and height\n \"\"\"\n\n tracker = KalmanFilter(dim_x=model.state_length,\n dim_z=model.measurement_length)\n tracker.F = model.build_F()\n tracker.Q = model.build_Q()\n tracker.H = model.build_H()\n tracker.R = model.build_R()\n tracker.P = model.build_P()\n\n if x0 is not None:\n tracker.x = x0\n\n return tracker\n\n\nDEFAULT_MODEL_SPEC = ModelPreset.constant_velocity_and_static_box_size_2d.value\n\n\ndef exponential_moving_average_fn(gamma: float) -> Any:\n def fn(old, new):\n if new is None:\n return old\n\n if isinstance(new, Iterable):\n new = np.array(new)\n\n if old is None:\n return new # first call\n else:\n return gamma * old + (1 - gamma) * new\n\n return fn\n\n\nclass Tracker:\n def __init__(\n self,\n model_spec: dict = DEFAULT_MODEL_SPEC,\n dt: float = 1 / 24,\n x0: Optional[Vector] = None,\n box0: Optional[Box] = None,\n score0: float=None,\n class0: float=None,\n max_staleness: float = 12.0,\n smooth_score_gamma: float = 0.8,\n smooth_feature_gamma: float = 0.9):\n self.id = str(uuid.uuid4())\n self.model_spec = model_spec\n\n self.steps_alive = 1\n self.steps_positive = 1\n self.staleness = 0.0\n self.max_staleness = max_staleness\n\n self.update_score_fn = exponential_moving_average_fn(smooth_score_gamma)\n self.update_feature_fn = exponential_moving_average_fn(smooth_feature_gamma)\n\n self.score = score0\n self.feature = None\n self.cl=class0\n\n logger.debug(\n 'creating new object tracker with %s and id %s' % (self.model_spec, self.id))\n\n self.model = Model(dt=dt, **self.model_spec)\n\n if x0 is None:\n x0 = self.model.box_to_x(box0)\n\n self._tracker = get_single_object_tracker(model=self.model, x0=x0)\n\n def predict(self):\n self.steps_alive += 1\n self._tracker.predict()\n\n def update(self, detection: Detection):\n self.steps_positive += 1\n\n # KF tracker update for position and size\n z = self.model.box_to_z(detection.box)\n self._tracker.update(z)\n \n self.cl=detection.cl\n\n self.score = self.update_score_fn(old=self.score, new=detection.score)\n #self.feature = self.update_feature_fn(old=self.feature, new=detection.feature)\n\n\n # reduce the staleness of a tracker, faster than growth rate\n self.unstale(rate=3)\n\n def stale(self, rate: float = 1.0):\n self.staleness += 4.5\n return self.staleness\n\n def unstale(self, rate: float = 2.0):\n self.staleness = max(0, self.staleness - rate)\n return self.staleness\n\n @property\n def is_stale(self) -> bool:\n return self.staleness >= self.max_staleness\n\n @property\n def is_invalid(self) -> bool:\n try:\n has_nans = any(np.isnan(self._tracker.x))\n return has_nans\n except Exception as e:\n logger.warning('invalid tracker, exception: %s' % str(e))\n return True\n\n @property\n def box(self):\n return self.model.x_to_box(self._tracker.x)\n\n def __repr__(self):\n fmt = \"box: %s\\tstaleness: %d\"\n return fmt % (self.box, self.staleness)\n\n\n\"\"\" assignment cost calculation & matching methods \"\"\"\n\n\ndef match_by_cost_matrix(trackers: Sequence[Tracker],\n detections: Sequence[Detection],\n min_iou: float = 0.1,\n **kwargs) -> np.ndarray:\n if len(trackers) == 0 or len(detections) == 0:\n return []\n\n cost_mat, iou_mat = cost_matrix_iou_feature(trackers, detections, **kwargs)\n row_ind, col_ind = scipy.optimize.linear_sum_assignment(cost_mat)\n\n # filter out low IOU matches\n ret = [[r, c] for r, c in zip(row_ind, col_ind) if iou_mat[r, c] >= min_iou]\n return np.array(ret)\n\n\ndef _sequence_has_none(seq: Sequence[Any]) -> Sequence[Any]:\n return any(r is None for r in seq)\n\n\ndef cost_matrix_iou_feature(trackers: Sequence[Tracker],\n detections: Sequence[Detection],\n feature_similarity_fn=angular_similarity,\n feature_similarity_beta: float = None):\n\n # boxes\n b1 = np.array([t.box for t in trackers])\n b2 = np.array([d.box for d in detections])\n\n # box iou\n inferred_dim = int(len(b1[0]) / 2)\n iou_mat = calculate_iou(b1, b2, dim=inferred_dim)\n\n # feature similarity\n if feature_similarity_beta is not None:\n # get features\n f1 = [t.feature for t in trackers]\n f2 = [d.feature for d in detections]\n\n if _sequence_has_none(f1) or _sequence_has_none(f2):\n # fallback to pure IOU due to missing features\n apt_mat = iou_mat\n else:\n sim_mat = feature_similarity_fn(f1, f2)\n sim_mat = feature_similarity_beta + (1 - feature_similarity_beta) * sim_mat\n\n # combined aptitude\n apt_mat = np.multiply(iou_mat, sim_mat)\n else:\n apt_mat = iou_mat\n\n cost_mat = -1.0 * apt_mat\n return cost_mat, iou_mat\n\n\nclass MatchingFunction:\n def __call__(self,\n trackers: Sequence[Tracker],\n detections: Sequence[Detection]) -> np.ndarray:\n raise NotImplementedError()\n\n\nclass BasicMatchingFunction(MatchingFunction):\n \"\"\" class implements the most basic matching function, taking\n detections boxes and optional feature similarity into account \"\"\"\n\n def __init__(self, min_iou: float = 0.1,\n feature_similarity_fn=angular_similarity,\n feature_similarity_beta: Optional[float] = None) -> None:\n\n self.min_iou = min_iou\n self.feature_similarity_fn = feature_similarity_fn\n self.feature_similarity_beta = feature_similarity_beta\n\n def __call__(self,\n trackers: Sequence[Tracker],\n detections: Sequence[Detection]) -> np.ndarray:\n return match_by_cost_matrix(\n trackers, detections,\n self.min_iou,\n feature_similarity_fn=self.feature_similarity_fn,\n feature_similarity_beta=self.feature_similarity_beta)\n\n\nclass MultiObjectTracker:\n def __init__(self, dt: float,\n model_spec: Union[str, dict] = DEFAULT_MODEL_SPEC,\n matching_fn: Optional[MatchingFunction] = None,\n tracker_kwargs: dict = None,\n matching_fn_kwargs: dict = None,\n active_tracks_kwargs: dict = None) -> None:\n \"\"\"\n model_spec specifies the dimension and order for position and size of the object\n matching_fn determines the strategy on which the trackers and detections are assigned.\n\n tracker_kwargs are passed to each single object tracker\n active_tracks_kwargs limits surfacing of fresh/fading out tracks\n \"\"\"\n\n self.dt = dt\n self.trackers = []\n\n if isinstance(model_spec, dict):\n self.model_spec = model_spec\n elif isinstance(model_spec, str) and model_spec in ModelPreset.__members__:\n self.model_spec = ModelPreset[model_spec].value\n else:\n raise NotImplementedError('unsupported motion model %s' % str(model_spec))\n logger.debug('using model spec: %s' % str(self.model_spec))\n\n self.matching_fn = matching_fn\n self.matching_fn_kwargs = matching_fn_kwargs if matching_fn_kwargs is not None else {}\n if self.matching_fn is None:\n self.matching_fn = BasicMatchingFunction(**self.matching_fn_kwargs)\n\n # kwargs to be passed to each single object tracker\n self.tracker_kwargs = tracker_kwargs if tracker_kwargs is not None else {}\n logger.debug('using tracker_kwargs: %s' % str(self.tracker_kwargs))\n\n # kwargs to be used when self.step returns active tracks\n self.active_tracks_kwargs = active_tracks_kwargs if active_tracks_kwargs is not None else {}\n logger.debug('using active_tracks_kwargs: %s' % str(self.active_tracks_kwargs))\n\n def active_tracks(self,\n max_staleness_to_positive_ratio: float = 3.0,\n max_staleness: float = 999,\n min_steps_alive: int = -1) -> Sequence[Track]:\n \"\"\" returns all active tracks after optional filtering by tracker steps count and staleness \"\"\"\n\n tracks = []\n for tracker in self.trackers:\n cond1 = tracker.staleness / tracker.steps_positive < max_staleness_to_positive_ratio # early stage\n cond2 = tracker.staleness < max_staleness\n cond3 = tracker.steps_alive >= min_steps_alive\n if cond1 and cond2 and cond3:\n tracks.append(Track(id=tracker.id, box=tracker.box, score=tracker.score,\n cl=tracker.cl))\n\n logger.debug('active/all tracks: %d/%d' % (len(self.trackers), len(tracks)))\n return tracks\n\n def cleanup_trackers(self) -> None:\n count_before = len(self.trackers)\n self.trackers = [t for t in self.trackers if not (t.is_stale or t.is_invalid)]\n count_after = len(self.trackers)\n logger.debug('deleted %s/%s trackers' % (count_before - count_after, count_before))\n\n def step(self, detections: Sequence[Detection]) -> Sequence[Track]:\n \"\"\" the method matches the new detections with existing trackers,\n creates new trackers if necessary and performs the cleanup.\n Returns the active tracks after active filtering applied \"\"\"\n \n\n\n #Delete detection if it contains empty bbox\n detections = [det for det in detections if det.box is not None]\n \n \n \n\n logger.debug('step with %d detections' % len(detections))\n #Matching btw previously tracked objects and newly detected objects \n matches = self.matching_fn(self.trackers, detections)\n logger.debug('matched %d pairs' % len(matches))\n\n # all trackers: predict\n for t in self.trackers:\n t.predict()#?????\n\n # assigned trackers: correct\n for match in matches:\n track_idx, det_idx = match[0], match[1]\n self.trackers[track_idx].update(detection=detections[det_idx])\n\n # not assigned detections: create new trackers POF\n assigned_det_idxs = set(matches[:, 1]) if len(matches) > 0 else []\n for det_idx in set(range(len(detections))).difference(assigned_det_idxs):\n tracker = Tracker(box0=detections[det_idx].box,score0=detections[det_idx].score,\n class0=detections[det_idx].cl,\n model_spec=self.model_spec,\n **self.tracker_kwargs)\n self.trackers.append(tracker)\n\n # unassigned trackers\n assigned_track_idxs = set(matches[:, 0]) if len(matches) > 0 else []\n for track_idx in set(range(len(self.trackers))).difference(assigned_track_idxs):\n self.trackers[track_idx].stale()\n\n # cleanup dead trackers\n self.cleanup_trackers()\n \n\n return self.active_tracks(**self.active_tracks_kwargs)\n\n" ]
[ [ "numpy.array", "numpy.multiply", "scipy.optimize.linear_sum_assignment", "numpy.isnan" ] ]
borisbolliet/CCL
[ "6ddd35e49f9d2968cef3d3bc1bac8b55dbb4cf91" ]
[ "pyccl/tracers.py" ]
[ "from . import ccllib as lib\nfrom .core import check\nfrom .background import comoving_radial_distance, growth_rate, \\\n growth_factor, scale_factor_of_chi\nfrom .pyutils import _check_array_params, NoneArr\nimport numpy as np\n\n\ndef get_density_kernel(cosmo, dndz):\n \"\"\"This convenience function returns the radial kernel for\n galaxy-clustering-like tracers. Given an unnormalized\n redshift distribution, it returns two arrays: chi, w(chi),\n where chi is an array of radial distances in units of\n Mpc and w(chi) = p(z) * H(z), where H(z) is the expansion\n rate in units of Mpc^-1 and p(z) is the normalized\n redshift distribution.\n\n Args:\n cosmo (:class:`~pyccl.core.Cosmology`): cosmology object used to\n transform redshifts into distances.\n dndz (tulple of arrays): A tuple of arrays (z, N(z))\n giving the redshift distribution of the objects.\n The units are arbitrary; N(z) will be normalized\n to unity.\n \"\"\"\n z_n, n = _check_array_params(dndz, 'dndz')\n # this call inits the distance splines neded by the kernel functions\n chi = comoving_radial_distance(cosmo, 1./(1.+z_n))\n status = 0\n wchi, status = lib.get_number_counts_kernel_wrapper(cosmo.cosmo,\n z_n, n,\n len(z_n),\n status)\n check(status)\n return chi, wchi\n\n\ndef get_lensing_kernel(cosmo, dndz, mag_bias=None):\n \"\"\"This convenience function returns the radial kernel for\n weak-lensing-like. Given an unnormalized redshift distribution\n and an optional magnification bias function, it returns\n two arrays: chi, w(chi), where chi is an array of radial\n distances in units of Mpc and w(chi) is the lensing shear\n kernel (or the magnification one if `mag_bias` is not `None`).\n\n Args:\n cosmo (:class:`~pyccl.core.Cosmology`): cosmology object used to\n transform redshifts into distances.\n dndz (tulple of arrays): A tuple of arrays (z, N(z))\n giving the redshift distribution of the objects.\n The units are arbitrary; N(z) will be normalized\n to unity.\n mag_bias (tuple of arrays, optional): A tuple of arrays (z, s(z))\n giving the magnification bias as a function of redshift. If\n `None`, s=0 will be assumed\n \"\"\"\n # we need the distance functions at the C layer\n cosmo.compute_distances()\n\n z_n, n = _check_array_params(dndz, 'dndz')\n has_magbias = mag_bias is not None\n z_s, s = _check_array_params(mag_bias, 'mag_bias')\n\n # Calculate number of samples in chi\n nchi = lib.get_nchi_lensing_kernel_wrapper(z_n)\n # Compute array of chis\n status = 0\n chi, status = lib.get_chis_lensing_kernel_wrapper(cosmo.cosmo, z_n[-1],\n nchi, status)\n # Compute kernel\n wchi, status = lib.get_lensing_kernel_wrapper(cosmo.cosmo,\n z_n, n, z_n[-1],\n int(has_magbias), z_s, s,\n chi, nchi, status)\n check(status)\n return chi, wchi\n\n\ndef get_kappa_kernel(cosmo, z_source, nsamples):\n \"\"\"This convenience function returns the radial kernel for\n CMB-lensing-like tracers.\n\n Args:\n cosmo (:class:`~pyccl.core.Cosmology`): Cosmology object.\n z_source (float): Redshift of source plane for CMB lensing.\n nsamples (int): number of samples over which the kernel\n is desired. These will be equi-spaced in radial distance.\n The kernel is quite smooth, so usually O(100) samples\n is enough.\n \"\"\"\n # this call inits the distance splines neded by the kernel functions\n chi_source = comoving_radial_distance(cosmo, 1./(1.+z_source))\n chi = np.linspace(0, chi_source, nsamples)\n\n status = 0\n wchi, status = lib.get_kappa_kernel_wrapper(cosmo.cosmo, chi_source,\n chi, nsamples, status)\n check(status)\n return chi, wchi\n\n\nclass Tracer(object):\n \"\"\"Tracers contain the information necessary to describe the\n contribution of a given sky observable to its cross-power spectrum\n with any other tracer. Tracers are composed of 4 main ingredients:\n\n * A radial kernel: this expresses the support in redshift/distance\n over which this tracer extends.\n\n * A transfer function: this is a function of wavenumber and\n scale factor that describes the connection between the tracer\n and the power spectrum on different scales and at different\n cosmic times.\n\n * An ell-dependent prefactor: normally associated with angular\n derivatives of a given fundamental quantity.\n\n * The order of the derivative of the Bessel functions with which\n they enter the computation of the angular power spectrum.\n\n A `Tracer` object will in reality be a list of different such\n tracers that get combined linearly when computing power spectra.\n Further details can be found in Section 4.9 of the CCL note.\n \"\"\"\n def __init__(self):\n \"\"\"By default this `Tracer` object will contain no actual\n tracers\n \"\"\"\n # Do nothing, just initialize list of tracers\n self._trc = []\n\n def _dndz(self, z):\n raise NotImplementedError(\"`get_dndz` not implemented for \"\n \"this `Tracer` type.\")\n\n def get_dndz(self, z):\n \"\"\"Get the redshift distribution for this tracer.\n Only available for some tracers (:class:`NumberCountsTracer` and\n :class:`WeakLensingTracer`).\n\n Args:\n z (float or array_like): redshift values.\n\n Returns:\n array_like: redshift distribution evaluated at the \\\n input values of `z`.\n \"\"\"\n return self._dndz(z)\n\n def get_kernel(self, chi):\n \"\"\"Get the radial kernels for all tracers contained\n in this `Tracer`.\n\n Args:\n chi (float or array_like): values of the comoving\n radial distance in increasing order and in Mpc.\n\n Returns:\n array_like: list of radial kernels for each tracer. \\\n The shape will be `(n_tracer, chi.size)`, where \\\n `n_tracer` is the number of tracers. The last \\\n dimension will be squeezed if the input is a \\\n scalar.\n \"\"\"\n if not hasattr(self, '_trc'):\n return []\n\n chi_use = np.atleast_1d(chi)\n kernels = []\n for t in self._trc:\n status = 0\n w, status = lib.cl_tracer_get_kernel(t, chi_use,\n chi_use.size,\n status)\n check(status)\n kernels.append(w)\n kernels = np.array(kernels)\n if np.ndim(chi) == 0:\n if kernels.shape != (0,):\n kernels = np.squeeze(kernels, axis=-1)\n return kernels\n\n def get_f_ell(self, ell):\n \"\"\"Get the ell-dependent prefactors for all tracers\n contained in this `Tracer`.\n\n Args:\n ell (float or array_like): angular multipole values.\n\n Returns:\n array_like: list of prefactors for each tracer. \\\n The shape will be `(n_tracer, ell.size)`, where \\\n `n_tracer` is the number of tracers. The last \\\n dimension will be squeezed if the input is a \\\n scalar.\n \"\"\"\n if not hasattr(self, '_trc'):\n return []\n\n ell_use = np.atleast_1d(ell)\n f_ells = []\n for t in self._trc:\n status = 0\n f, status = lib.cl_tracer_get_f_ell(t, ell_use,\n ell_use.size,\n status)\n check(status)\n f_ells.append(f)\n f_ells = np.array(f_ells)\n if np.ndim(ell) == 0:\n if f_ells.shape != (0,):\n f_ells = np.squeeze(f_ells, axis=-1)\n return f_ells\n\n def get_transfer(self, lk, a):\n \"\"\"Get the transfer functions for all tracers contained\n in this `Tracer`.\n\n Args:\n lk (float or array_like): values of the natural logarithm of\n the wave number (in units of inverse Mpc) in increasing\n order.\n a (float or array_like): values of the scale factor.\n\n Returns:\n array_like: list of transfer functions for each tracer. \\\n The shape will be `(n_tracer, lk.size, a.size)`, where \\\n `n_tracer` is the number of tracers. The other \\\n dimensions will be squeezed if the inputs are scalars.\n \"\"\"\n if not hasattr(self, '_trc'):\n return []\n\n lk_use = np.atleast_1d(lk)\n a_use = np.atleast_1d(a)\n transfers = []\n for t in self._trc:\n status = 0\n t, status = lib.cl_tracer_get_transfer(t, lk_use, a_use,\n lk_use.size * a_use.size,\n status)\n check(status)\n transfers.append(t.reshape([lk_use.size, a_use.size]))\n transfers = np.array(transfers)\n if transfers.shape != (0,):\n if np.ndim(a) == 0:\n transfers = np.squeeze(transfers, axis=-1)\n if np.ndim(lk) == 0:\n transfers = np.squeeze(transfers, axis=-1)\n else:\n if np.ndim(lk) == 0:\n transfers = np.squeeze(transfers, axis=-2)\n return transfers\n\n def get_bessel_derivative(self):\n \"\"\"Get Bessel function derivative orders for all tracers contained\n in this `Tracer`.\n\n Returns:\n array_like: list of Bessel derivative orders for each tracer.\n \"\"\"\n if not hasattr(self, '_trc'):\n return []\n\n return np.array([t.der_bessel for t in self._trc])\n\n def add_tracer(self, cosmo, kernel=None,\n transfer_ka=None, transfer_k=None, transfer_a=None,\n der_bessel=0, der_angles=0,\n is_logt=False, extrap_order_lok=0, extrap_order_hik=2):\n \"\"\"Adds one more tracer to the list contained in this `Tracer`.\n\n Args:\n cosmo (:class:`~pyccl.core.Cosmology`): cosmology object.\n kernel (tulple of arrays, optional): A tuple of arrays\n (`chi`, `w_chi`) describing the radial kernel of this\n tracer. `chi` should contain values of the comoving\n radial distance in increasing order, and `w_chi` should\n contain the values of the kernel at those values of the\n radial distance. The kernel will be assumed to be zero\n outside the range of distances covered by `chi`. If\n `kernel` is `None` a constant kernel w(chi)=1 will be\n assumed everywhere.\n transfer_ka (tuple of arrays, optional): a tuple of arrays\n (`a`,`lk`,`t_ka`) describing the most general transfer\n function for a tracer. `a` should be an array of scale\n factor values in increasing order. `lk` should be an\n array of values of the natural logarithm of the wave\n number (in units of inverse Mpc) in increasing order.\n `t_ka` should be an array of shape `(na,nk)`, where\n `na` and `nk` are the sizes of `a` and `lk` respectively.\n `t_ka` should hold the values of the transfer function at\n the corresponding values of `a` and `lk`. If your transfer\n function is factorizable (i.e. T(a,k) = A(a) * K(k)), it is\n more efficient to set this to `None` and use `transfer_k`\n and `transfer_a` to describe K and A respectively. The\n transfer function will be assumed continuous and constant\n outside the range of scale factors covered by `a`. It will\n be extrapolated using polynomials of order `extrap_order_lok`\n and `extrap_order_hik` below and above the range of\n wavenumbers covered by `lk` respectively. If this argument\n is not `None`, the values of `transfer_k` and `transfer_a`\n will be ignored.\n transfer_k (tuple of arrays, optional): a tuple of arrays\n (`lk`,`t_k`) describing the scale-dependent part of a\n factorizable transfer function. `lk` should be an\n array of values of the natural logarithm of the wave\n number (in units of inverse Mpc) in increasing order.\n `t_k ` should be an array of the same size holding the\n values of the k-dependent part of the transfer function\n at those wavenumbers. It will be extrapolated using\n polynomials of order `extrap_order_lok` and `extrap_order_hik`\n below and above the range of wavenumbers covered by `lk`\n respectively. If `None`, the k-dependent part of the transfer\n function will be set to 1 everywhere.\n transfer_a (tuple of arrays, optional): a tuple of arrays\n (`a`,`t_a`) describing the time-dependent part of a\n factorizable transfer function. `a` should be an array of\n scale factor values in increasing order. `t_a` should\n contain the time-dependent part of the transfer function\n at those values of the scale factor. The time dependence\n will be assumed continuous and constant outside the range\n covered by `a`. If `None`, the time-dependent part of the\n transfer function will be set to 1 everywhere.\n der_bessel (int): order of the derivative of the Bessel\n functions with which this tracer enters the calculation\n of the power spectrum. Allowed values are -1, 0, 1 and 2.\n 0, 1 and 2 correspond to the raw functions, their first\n derivatives or their second derivatives. -1 corresponds to\n the raw functions divided by the square of their argument.\n We enable this special value because this type of dependence\n is ubiquitous for many common tracers (lensing, IAs), and\n makes the corresponding transfer functions more stables\n for small k or chi.\n der_angles (int): integer describing the ell-dependent prefactor\n associated with this tracer. Allowed values are 0, 1 and 2.\n 0 means no prefactor. 1 means a prefactor ell*(ell+1),\n associated with the angular laplacian and used e.g. for\n lensing convergence and magnification. 2 means a prefactor\n sqrt((ell+2)!/(ell-2)!), associated with the angular\n derivatives of spin-2 fields (e.g. cosmic shear, IAs).\n is_logt (bool): if `True`, `transfer_ka`, `transfer_k` and\n `transfer_a` will contain the natural logarithm of the\n transfer function (or their factorizable parts). Default is\n `False`.\n extrap_order_lok (int): polynomial order used to extrapolate the\n transfer functions for low wavenumbers not covered by the\n input arrays.\n extrap_order_hik (int): polynomial order used to extrapolate the\n transfer functions for high wavenumbers not covered by the\n input arrays.\n \"\"\"\n is_factorizable = transfer_ka is None\n is_k_constant = (transfer_ka is None) and (transfer_k is None)\n is_a_constant = (transfer_ka is None) and (transfer_a is None)\n is_kernel_constant = kernel is None\n\n chi_s, wchi_s = _check_array_params(kernel, 'kernel')\n if is_factorizable:\n a_s, ta_s = _check_array_params(transfer_a, 'transfer_a')\n lk_s, tk_s = _check_array_params(transfer_k, 'transfer_k')\n tka_s = NoneArr\n if (not is_a_constant) and (a_s.shape != ta_s.shape):\n raise ValueError(\"Time-dependent transfer arrays \"\n \"should have the same shape\")\n if (not is_k_constant) and (lk_s.shape != tk_s.shape):\n raise ValueError(\"Scale-dependent transfer arrays \"\n \"should have the same shape\")\n else:\n a_s, lk_s, tka_s = _check_array_params(transfer_ka, 'transer_ka',\n arr3=True)\n if tka_s.shape != (len(a_s), len(lk_s)):\n raise ValueError(\"2D transfer array has inconsistent \"\n \"shape. Should be (na,nk)\")\n tka_s = tka_s.flatten()\n ta_s = NoneArr\n tk_s = NoneArr\n\n status = 0\n ret = lib.cl_tracer_t_new_wrapper(cosmo.cosmo,\n int(der_bessel),\n int(der_angles),\n chi_s, wchi_s,\n a_s, lk_s,\n tka_s, tk_s, ta_s,\n int(is_logt),\n int(is_factorizable),\n int(is_k_constant),\n int(is_a_constant),\n int(is_kernel_constant),\n int(extrap_order_lok),\n int(extrap_order_hik),\n status)\n self._trc.append(_check_returned_tracer(ret))\n\n def __del__(self):\n # Sometimes lib is freed before some Tracers, in which case, this\n # doesn't work.\n # So just check that lib.cl_tracer_t_free is still a real function.\n if hasattr(self, '_trc') and lib.cl_tracer_t_free is not None:\n for t in self._trc:\n lib.cl_tracer_t_free(t)\n\n\nclass NumberCountsTracer(Tracer):\n \"\"\"Specific `Tracer` associated to galaxy clustering with linear\n scale-independent bias, including redshift-space distortions and\n magnification.\n\n Args:\n cosmo (:class:`~pyccl.core.Cosmology`): Cosmology object.\n has_rsd (bool): Flag for whether the tracer has a\n redshift-space distortion term.\n dndz (tuple of arrays): A tuple of arrays (z, N(z))\n giving the redshift distribution of the objects. The units are\n arbitrary; N(z) will be normalized to unity.\n bias (tuple of arrays): A tuple of arrays (z, b(z))\n giving the galaxy bias. If `None`, this tracer won't include\n a term proportional to the matter density contrast.\n mag_bias (tuple of arrays, optional): A tuple of arrays (z, s(z))\n giving the magnification bias as a function of redshift. If\n `None`, the tracer is assumed to not have magnification bias\n terms. Defaults to None.\n \"\"\"\n def __init__(self, cosmo, has_rsd, dndz, bias, mag_bias=None):\n self._trc = []\n\n # we need the distance functions at the C layer\n cosmo.compute_distances()\n\n from scipy.interpolate import interp1d\n z_n, n = _check_array_params(dndz, 'dndz')\n self._dndz = interp1d(z_n, n, bounds_error=False,\n fill_value=0)\n\n kernel_d = None\n if bias is not None: # Has density term\n # Kernel\n if kernel_d is None:\n kernel_d = get_density_kernel(cosmo, dndz)\n # Transfer\n z_b, b = _check_array_params(bias, 'bias')\n # Reverse order for increasing a\n t_a = (1./(1+z_b[::-1]), b[::-1])\n self.add_tracer(cosmo, kernel=kernel_d, transfer_a=t_a)\n if has_rsd: # Has RSDs\n # Kernel\n if kernel_d is None:\n kernel_d = get_density_kernel(cosmo, dndz)\n # Transfer (growth rate)\n z_b, _ = _check_array_params(dndz, 'dndz')\n a_s = 1./(1+z_b[::-1])\n t_a = (a_s, -growth_rate(cosmo, a_s))\n self.add_tracer(cosmo, kernel=kernel_d,\n transfer_a=t_a, der_bessel=2)\n if mag_bias is not None: # Has magnification bias\n # Kernel\n chi, w = get_lensing_kernel(cosmo, dndz, mag_bias=mag_bias)\n # Multiply by -2 for magnification\n kernel_m = (chi, -2 * w)\n self.add_tracer(cosmo, kernel=kernel_m,\n der_bessel=-1, der_angles=1)\n\n\nclass WeakLensingTracer(Tracer):\n \"\"\"Specific `Tracer` associated to galaxy shape distortions including\n lensing shear and intrinsic alignments within the L-NLA model.\n\n Args:\n cosmo (:class:`~pyccl.core.Cosmology`): Cosmology object.\n dndz (tuple of arrays): A tuple of arrays (z, N(z))\n giving the redshift distribution of the objects. The units are\n arbitrary; N(z) will be normalized to unity.\n has_shear (bool): set to `False` if you want to omit the lensing shear\n contribution from this tracer.\n ia_bias (tuple of arrays, optional): A tuple of arrays\n (z, A_IA(z)) giving the intrinsic alignment amplitude A_IA(z).\n If `None`, the tracer is assumped to not have intrinsic\n alignments. Defaults to None.\n use_A_ia (bool): set to True to use the conventional IA\n normalization. Set to False to use the raw input amplitude,\n which will usually be 1 for use with PT IA modeling.\n Defaults to True.\n \"\"\"\n def __init__(self, cosmo, dndz, has_shear=True, ia_bias=None,\n use_A_ia=True):\n self._trc = []\n\n # we need the distance functions at the C layer\n cosmo.compute_distances()\n\n from scipy.interpolate import interp1d\n z_n, n = _check_array_params(dndz, 'dndz')\n self._dndz = interp1d(z_n, n, bounds_error=False,\n fill_value=0)\n\n if has_shear:\n # Kernel\n kernel_l = get_lensing_kernel(cosmo, dndz)\n self.add_tracer(cosmo, kernel=kernel_l,\n der_bessel=-1, der_angles=2)\n if ia_bias is not None: # Has intrinsic alignments\n z_a, tmp_a = _check_array_params(ia_bias, 'ia_bias')\n # Kernel\n kernel_i = get_density_kernel(cosmo, dndz)\n if use_A_ia:\n # Normalize so that A_IA=1\n D = growth_factor(cosmo, 1./(1+z_a))\n # Transfer\n # See Joachimi et al. (2011), arXiv: 1008.3491, Eq. 6.\n # and note that we use C_1= 5e-14 from arXiv:0705.0166\n rho_m = lib.cvar.constants.RHO_CRITICAL * cosmo['Omega_m']\n a = - tmp_a * 5e-14 * rho_m / D\n else:\n # use the raw input normalization. Normally, this will be 1\n # to allow nonlinear PT IA models, where normalization is\n # already applied to the power spectrum.\n a = tmp_a\n # Reverse order for increasing a\n t_a = (1./(1+z_a[::-1]), a[::-1])\n self.add_tracer(cosmo, kernel=kernel_i, transfer_a=t_a,\n der_bessel=-1, der_angles=2)\n\n\nclass CMBLensingTracer(Tracer):\n \"\"\"A Tracer for CMB lensing.\n\n Args:\n cosmo (:class:`~pyccl.core.Cosmology`): Cosmology object.\n z_source (float): Redshift of source plane for CMB lensing.\n nsamples (int, optional): number of samples over which the kernel\n is desired. These will be equi-spaced in radial distance.\n The kernel is quite smooth, so usually O(100) samples\n is enough.\n \"\"\"\n def __init__(self, cosmo, z_source, n_samples=100):\n self._trc = []\n\n # we need the distance functions at the C layer\n cosmo.compute_distances()\n\n kernel = get_kappa_kernel(cosmo, z_source, n_samples)\n self.add_tracer(cosmo, kernel=kernel, der_bessel=-1, der_angles=1)\n\n\nclass tSZTracer(Tracer):\n \"\"\"Specific :class:`Tracer` associated with the thermal Sunyaev Zel'dovich\n Compton-y parameter. The radial kernel for this tracer is simply given by\n\n .. math::\n W(\\\\chi) = \\\\frac{\\\\sigma_T}{m_ec^2} \\\\frac{1}{1+z},\n\n where :math:`\\\\sigma_T` is the Thomson scattering cross section and\n :math:`m_e` is the electron mass.\n\n Any angular power spectra computed with this tracer, should use\n a three-dimensional power spectrum involving the electron pressure\n in physical (non-comoving) units of :math:`eV\\\\,{\\\\rm cm}^{-3}`.\n\n Args:\n cosmo (:class:`~pyccl.core.Cosmology`): Cosmology object.\n zmax (float): maximum redshift up to which we define the\n kernel.\n n_chi (float): number of intervals in the radial comoving\n distance on which we sample the kernel.\n \"\"\"\n def __init__(self, cosmo, z_max=6., n_chi=1024):\n self.chi_max = comoving_radial_distance(cosmo, 1./(1+z_max))\n chi_arr = np.linspace(0, self.chi_max, n_chi)\n a_arr = scale_factor_of_chi(cosmo, chi_arr)\n # This is \\sigma_T / (m_e * c^2)\n prefac = 4.01710079e-06\n w_arr = prefac * a_arr\n\n self._trc = []\n self.add_tracer(cosmo, kernel=(chi_arr, w_arr))\n\n\ndef _check_returned_tracer(return_val):\n \"\"\"Wrapper to catch exceptions when tracers are spawned from C.\n \"\"\"\n if (isinstance(return_val, int)):\n check(return_val)\n tr = None\n else:\n tr, _ = return_val\n return tr\n" ]
[ [ "scipy.interpolate.interp1d", "numpy.squeeze", "numpy.atleast_1d", "numpy.ndim", "numpy.array", "numpy.linspace" ] ]
wilile26811249/ConvMixer
[ "e3a2bcbedb8e8c2b7b5f942ed0ae55473940d47c" ]
[ "utils.py" ]
[ "from typing import List, Optional\n\nimport numpy as np\nimport torch\nfrom torchvision import datasets\nfrom torchvision import transforms as T\n\nclass AverageMeter(object):\n def __init__(self,\n name: str,\n fmt: Optional[str] = ':f',\n ) -> None:\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self,\n val: float,\n n: Optional[int] = 1\n ) -> None:\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name}:{val' + self.fmt + '}({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self,\n num_batches: int,\n meters: List[AverageMeter],\n prefix: Optional[str] = \"\",\n batch_info: Optional[str] = \"\"\n ) -> None:\n self.batch_fmster = self._get_batch_fmster(num_batches)\n self.meters = meters\n self.prefix = prefix\n self.batch_info = batch_info\n\n def display(self, batch):\n self.info = [self.prefix + self.batch_info + self.batch_fmster.format(batch)]\n self.info += [str(meter) for meter in self.meters]\n print('\\t'.join(self.info))\n\n def _get_batch_fmster(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\nclass EarlyStopping(object):\n \"\"\"\n Arg\n \"\"\"\n def __init__(self,\n patience: int = 7,\n verbose: Optional[bool] = False,\n delta: Optional[float] = 0.0,\n path: Optional[str] = \"checkpoint.pt\"\n ) -> None:\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop_flag = False\n self.val_loss_min = np.Inf\n self.delta = delta\n self.verbose = verbose\n self.path = path\n\n def __call__(self, val_loss, model):\n score = abs(val_loss)\n if self.best_score is None:\n self.best_score = score\n self.save_model(val_loss, model)\n elif val_loss > self.val_loss_min + self.delta:\n self.counter += 1\n if self.verbose:\n print(f\"EarlyStopping Counter: {self.counter} out of {self.patience}\")\n print(f\"Best val loss: {self.val_loss_min} Current val loss: {score}\")\n if self.counter >= self.patience:\n self.early_stop_flag = True\n else:\n self.best_score = score\n self.save_model(val_loss, model)\n self.counter = 0\n\n def save_model(self, val_loss, model):\n if self.verbose:\n print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')\n torch.save(model.state_dict(), self.path)\n self.val_loss_min = val_loss\n\n\ndef accuracy(output, target, topk = (1,)):\n \"\"\"\n Computes the accuracy over the top k predictions\n \"\"\"\n with torch.no_grad():\n max_k = max(topk)\n batch_size = output.size(0)\n\n _, pred = output.topk(max_k,\n dim = 1,\n largest = True,\n sorted = True\n )\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n result = []\n for k in topk:\n correct_k = correct[: k].contiguous().view(-1).float().sum(0, keepdim = True)\n result.append(correct_k.mul_(100.0 / batch_size))\n return result\n\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\n\ndef get_cifar10_dataset(train_transform = None, test_transform = None):\n train_dataset = datasets.CIFAR10(\n root = './data',\n train = True,\n transform = train_transform,\n download = True\n )\n test_dataset = datasets.CIFAR10(\n root = './data',\n train = False,\n transform = test_transform,\n download = True\n )\n return train_dataset, test_dataset\n\n\ndef get_dataloader(\n train_transform,\n test_transform,\n img_size = 224,\n split = (0.8, 0.2),\n **kwargs\n ):\n assert len(split) == 2\n assert sum(split) == 1\n assert split[0] + split[1] == 1\n\n train_dataset, test_dataset = get_cifar10_dataset(train_transform, test_transform)\n train_size = int(len(train_dataset) * split[0])\n test_size = int(len(train_dataset) * split[1])\n train_dataset, val_dataset = torch.utils.data.random_split(\n train_dataset,\n (train_size, test_size)\n )\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size = kwargs['batch_size'],\n shuffle = True,\n num_workers = kwargs['num_workers'],\n pin_memory = True,\n drop_last = True,\n sampler = None\n )\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size = kwargs['batch_size'],\n shuffle = False,\n num_workers = kwargs['num_workers'],\n pin_memory = True,\n drop_last = False,\n sampler = None\n )\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size = kwargs['batch_size'],\n shuffle = False,\n num_workers = kwargs['num_workers'],\n pin_memory = True,\n drop_last = False,\n sampler = None\n )\n return train_loader, val_loader, test_loader" ]
[ [ "torch.utils.data.DataLoader", "torch.no_grad", "torch.utils.data.random_split" ] ]
josesho/bootstrap-contrast
[ "94fa42a5dc4622be016e2e522d1f07b19ba23a8d" ]
[ "setup.py" ]
[ "from setuptools import setup, find_packages\nimport os\n# Taken from setup.py in seaborn.\n# temporarily redirect config directory to prevent matplotlib importing\n# testing that for writeable directory which results in sandbox error in\n# certain easy_install versions\nos.environ[\"MPLCONFIGDIR\"]=\".\"\n\n# Modified from from setup.py in seaborn.\ntry:\n from setuptools import setup\n _has_setuptools=True\nexcept ImportError:\n from distutils.core import setup\n\ndef check_dependencies():\n to_install=[]\n\n try:\n import numpy\n except ImportError:\n to_install.append('numpy>=1.13.1')\n try:\n import scipy\n except ImportError:\n to_install.append('scipy>=0.19.1')\n try:\n import matplotlib\n except ImportError:\n to_install.append('matplotlib>=2.0.2')\n try:\n import pandas\n if int(pandas.__version__.split('.')[1])<20:\n to_install.append('pandas>=0.20.1')\n except ImportError:\n to_install.append('pandas>=0.20.1')\n try:\n import seaborn\n except ImportError:\n to_install.append('seaborn>0.8')\n\n return to_install\n\nif __name__==\"__main__\":\n\n installs=check_dependencies()\n setup(name='bootstrap_contrast',\n author='Joses Ho',\n author_email='[email protected]',\n version=1.0,\n description='Calculation and Visualization of Confidence Intervals and Effect Sizes for Python.',\n packages=find_packages(),\n install_requires=installs,\n url='http://github.com/josesho/bootstrap_contrast',\n license='MIT'\n )\n" ]
[ [ "pandas.__version__.split" ] ]
exenGT/pymatgen
[ "a8ffb820ab8fc3f60251099e38c8888f45eae618" ]
[ "pymatgen/analysis/piezo.py" ]
[ "# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\n\"\"\"\nThis module provides classes for the Piezoelectric tensor\n\"\"\"\nimport warnings\n\nimport numpy as np\n\nfrom pymatgen.core.tensors import Tensor\n\n__author__ = \"Shyam Dwaraknath\"\n__copyright__ = \"Copyright 2016, The Materials Project\"\n__version__ = \"1.0\"\n__maintainer__ = \"Shyam Dwaraknath\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n__date__ = \"Feb, 2016\"\n\n\nclass PiezoTensor(Tensor):\n \"\"\"\n This class describes the 3x6 piezo tensor in Voigt-notation\n \"\"\"\n\n def __new__(cls, input_array, tol=1e-3):\n \"\"\"\n Create an PiezoTensor object. The constructor throws an error if\n the shape of the input_matrix argument is not 3x3x3, i. e. in true\n tensor notation. Note that the constructor uses __new__ rather than\n __init__ according to the standard method of subclassing numpy\n ndarrays.\n\n Args:\n input_matrix (3x3x3 array-like): the 3x6 array-like\n representing the piezo tensor\n \"\"\"\n obj = super().__new__(cls, input_array, check_rank=3)\n if not (obj - np.transpose(obj, (0, 2, 1)) < tol).all():\n warnings.warn(\"Input piezo tensor does not satisfy standard symmetries\")\n return obj.view(cls)\n\n @classmethod\n def from_vasp_voigt(cls, input_vasp_array):\n \"\"\"\n Args:\n input_vasp_array (nd.array): Voigt form of tensor.\n\n Returns:\n PiezoTensor\n \"\"\"\n voigt_map = [(0, 0), (1, 1), (2, 2), (0, 1), (1, 2), (0, 2)]\n input_vasp_array = np.array(input_vasp_array)\n rank = 3\n\n pt = np.zeros([rank, 3, 3])\n for dim in range(rank):\n for pos, val in enumerate(voigt_map):\n pt[dim][voigt_map[pos]] = input_vasp_array[dim][pos]\n pt[dim].T[voigt_map[pos]] = input_vasp_array[dim][pos]\n\n return cls(pt)\n" ]
[ [ "numpy.array", "numpy.transpose", "numpy.zeros" ] ]
j-lazo/lumen_segmentation
[ "442b6e642b4743e6b7bf56ab77e11e8e95062ed7" ]
[ "general/calculate_performance_dataset.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 9 16:18:55 2020\n\n@author: jlazo\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 26 01:18:22 2020\n\n@author: jlazo\n\"\"\"\n\nimport os\nimport numpy as np\nimport cv2\nfrom glob import glob\nfrom sklearn.model_selection import train_test_split\nfrom os import listdir\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import accuracy_score\n\nimport os.path\nfrom os import path\nfrom PIL import Image\nfrom os import listdir\nfrom os.path import isfile, join\nfrom datetime import datetime\nimport csv\n\nproject_folder = '/home/nearlab/Jorge/current_work/lumen_segmentation/' \\\n 'data/lumen_data/'\ngeneral_model = 'ResUNet'\nmodel_to_test = 'ResUnet_lr_1e-05_bs_16_rgb_27_04_2021_20_10'\nfolder_to_test = 'phantom_001_pt1'\n\n\ndef calculate_area_and_circunference(dir_folder):\n mask_list = sorted(os.listdir(dir_folder))\n\n list_areas = []\n list_circuference = []\n list_circularities = []\n\n size_x = []\n size_y = []\n\n for mask in mask_list[:]:\n name_mask = ''.join([dir_folder, mask])\n\n arc_len, area = findArc(name_mask)\n if area != 0:\n circulatiry = 1.0*(arc_len**2)/(4*np.pi*area)\n list_circularities.append(circulatiry)\n\n\n list_areas.append(area)\n list_circuference.append(arc_len)\n\n #size_x.append(np.amax(list_x_pixels) - np.amin(list_x_pixels))\n #size_y.append(np.amax(list_y_pixels) - np.amin(list_y_pixels))\n\n return list_areas, list_circuference, list_circularities\n\n\ndef calculateDistance(x1, y1, X, Y):\n\n dist_vector = []\n for index, x2, in enumerate(X):\n y2 = Y[index]\n dist = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n dist_vector.append(dist)\n\n return dist_vector\n\n\ndef findArc(image, th=200):\n img = cv2.imread(image)\n res = img.copy()\n ## convert to gray\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ## threshold the gray\n th, threshed = cv2.threshold(gray, th, 255, cv2.THRESH_BINARY)\n ## Find contours on the binary threshed image\n cnts = cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n\n ## calcualte\n for cnt in cnts:\n arclen = cv2.arcLength(cnt, True)\n area = cv2.contourArea(cnt)\n cv2.drawContours(res, [cnt], -1, (0,255,0), 3, cv2.LINE_AA)\n #print(\"Length: {:.3f}\\nArea: {:.3f}\".format(arclen, area))\n\n cnt = cnts[0]\n pnts_x = [point[0][0] for point in cnt]\n pnts_y = [point[0][1] for point in cnt]\n\n moments = cv2.moments(cnt)\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n\n distances = calculateDistance(cx, cy, pnts_x, pnts_y)\n fig, ax = plt.subplots()\n ax.plot(cx, cy, 'ro')\n ax.add_artist(plt.Circle((cx, cy), np.min(distances), color='g', fill=False))\n ax.add_artist(plt.Circle((cx, cy), np.max(distances), color='b', fill=False))\n\n return arclen, area\n\n\ndef read_results_csv(file_path, row_id=0):\n dice_values = []\n with open(file_path, 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n dice_values.append(float(row[row_id]))\n \n return dice_values\n\n\ndef read_results_csv_str(file_path, row_id=0):\n dice_values = []\n with open(file_path, 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n dice_values.append(row[row_id])\n \n return dice_values\n\n\ndef calculate_rates(image_1, image_2):\n\n image_1 = np.asarray(image_1).astype(np.bool)\n image_2 = np.asarray(image_2).astype(np.bool)\n\n image_1 = image_1.flatten()\n image_2 = image_2.flatten()\n\n if image_1.shape != image_2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n accuracy_value = accuracy_score(image_1, image_2)\n\n if (np.unique(image_1) == [False]).all() and (np.unique(image_1) == [False]).all():\n recall_value = 1.\n precision_value = 1.\n\n else:\n recall_value = recall_score(image_1, image_2)\n precision_value = average_precision_score(image_1, image_2)\n\n return precision_value, recall_value, accuracy_value\n \n\ndef dice(im1, im2,smooth=.001):\n\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n \n if im1.shape != im2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n # Compute Dice coefficient\n intersection = np.logical_and(im1, im2)\n return 2. * (intersection.sum() + smooth) / (im1.sum() + im2.sum() + smooth)\n\n\ndef read_img(dir_image):\n original_img = cv2.imread(dir_image)\n height, width, depth = original_img.shape\n img = cv2.resize(original_img, (256, 256))\n img = img / 255\n img = (img > 0.9) * 1.0\n return img\n\n# ------ --- aqui empieza lo mero bueno -----\n# ---------- save the resutls of the validation dataset in a CSV file\n\n\nground_truth_imgs_dir = project_folder + 'test/' + folder_to_test + '/label/'\nresult_mask_dir = ''.join([project_folder, 'results/', general_model, '/',\n model_to_test, '/predictions/', folder_to_test,\n '/'])\n\nground_truth_image_list = sorted([file for file in listdir(ground_truth_imgs_dir) if isfile(join(ground_truth_imgs_dir, file))])\nresults_image_list = sorted([file for file in listdir(result_mask_dir) if isfile(join(result_mask_dir, file))])\n\nname_images = []\nresults_dice = []\nresults_sensitivity = []\nresults_specificity = []\nresults_accuracy = []\n\nfor image in ground_truth_image_list[:]:\n #print(image in results_image_list)\n #result_image = [name for name in results_image_list if image[-12:] == name[-12:]][0]\n\n if image in results_image_list:\n result_image = image\n print(result_image)\n original_mask = read_img(''.join([ground_truth_imgs_dir, image]))\n predicted_mask = read_img(''.join([result_mask_dir, result_image]))\n dice_val = dice(original_mask, predicted_mask)\n name_images.append(result_image)\n results_dice.append(dice_val)\n \n sensitivity, specificity, accuracy = calculate_rates(original_mask, predicted_mask)\n results_sensitivity.append(sensitivity)\n results_specificity.append(specificity)\n results_accuracy.append(accuracy)\n \n #print(sensitivity, specificity)\n else:\n print(image, 'not found in results list')\n\n\nground_truth_image_list = [file for file in listdir(ground_truth_imgs_dir) if isfile(join(ground_truth_imgs_dir, file))]\nresults_image_list = [file for file in listdir(result_mask_dir) if isfile(join(result_mask_dir, file))]\n\nnow = datetime.now()\nname_test_csv_file = ''.join([project_folder, 'results/', general_model,\n '/', model_to_test, '/',\n 'results_evaluation_',\n folder_to_test, '_',\n model_to_test,\n '_new.csv'])\nprint('saved in :', name_test_csv_file)\n\nwith open(name_test_csv_file, mode='w') as results_file:\n results_file_writer = csv.writer(results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for i, file in enumerate(name_images):\n results_file_writer.writerow([str(i), file, \n results_dice[i], \n results_sensitivity[i], \n results_specificity[i], \n results_accuracy[i]])\n #arclen, circunference])" ]
[ [ "numpy.logical_and", "numpy.asarray", "matplotlib.pyplot.subplots", "sklearn.metrics.accuracy_score", "numpy.max", "numpy.min", "numpy.sqrt", "sklearn.metrics.recall_score", "sklearn.metrics.average_precision_score", "numpy.unique" ] ]
mrawls/ELCtools
[ "7e15bf24c453ed4300c6f19f01cff74c041158b8" ]
[ "dnu_calculator.py" ]
[ "from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy import units as u\n'''\nMeredith Rawls, 2015\nTakes masses, radii, and chi2s from an ELC run and makes delta_nu distributions.\nAssumes a fixed temperature for each star.\n'''\ndir = '../../RG_ELCmodeling/9246715/demcmc001/'\n#dir = '../../RG_ELCmodeling/7037405/trial3/'\nstarparmfile = 'starparm.all'\nchifile = 'chi.all'\noutfile = 'deltanu_ELCcalc.txt'\nnplotbins = 100\n\nprint('Reading in giant files, be patient...')\nM1s, M2s, R1s, R2s = np.loadtxt(dir+starparmfile, usecols=(0,1,2,3), unpack=True)\nchi2s = np.genfromtxt(dir+chifile, usecols=(1,), unpack=True)\nprint('Finished reading in giant files!')\nT1 = 5000.\nT2 = 5000.\n\ndef calc_dnu(mass, radius, temp):\n density_sun = (1. * u.Msun) /(4./3. * np.pi * np.power((1. * u.Rsun),3))\n dnu_sun = 135.5 # muHz\n density = (mass * u.Msun) /(4./3. * np.pi * np.power((radius * u.Rsun),3))\n dnu = dnu_sun * np.sqrt(density/density_sun)\n return dnu\n\nprint('Calculating delta nu for each model, be patient...')\ndnu1s = []\ndnu2s = []\nfor M1, M2, R1, R2 in zip(M1s, M2s, R1s, R2s):\n dnu1s.append(calc_dnu(M1, R1, T1))\n dnu2s.append(calc_dnu(M2, R2, T2))\nprint('Finished calculating delta nus!')\n\nprint(np.median(dnu1s), np.std(dnu2s))\nprint(np.median(dnu2s), np.std(dnu2s))\ndelta_dnus = np.array(dnu1s) - np.array(dnu2s)\nprint(np.median(delta_dnus), np.std(delta_dnus))\n\nthreshold = 0.5 # muHz, typical mode width\ncount = 0 # tally for how many delta_dnus are within some threshold\nf1 = open(dir+outfile, 'w')\nfor chi2, dnu1, dnu2, delta_dnu in zip(chi2s, dnu1s, dnu2s, delta_dnus):\n print(chi2, dnu1, dnu2, delta_dnu, file=f1)\n if delta_dnu < threshold:\n count += 1\nf1.close()\n\nprint('Out of {0} models, {1} ({2}\\%) have a difference in dnu less than {3}.'.format \\\n (len(chi2s), count, float(count)/float(len(chi2s)), threshold))\n\n# Plot histograms of dnu1s, dnu2s, and the difference\nfig = plt.figure()\nax1 = fig.add_subplot(1, 3, 1)\nx1 = plt.xlabel('Delta nu 1')\nhistogram = plt.hist(dnu1s, nplotbins, histtype='stepfilled')\nax2 = fig.add_subplot(1, 3, 2)\nx2 = plt.xlabel('Delta nu 2')\nhistogram = plt.hist(dnu2s, nplotbins, histtype='stepfilled')\nax3 = fig.add_subplot(1, 3, 3)\nx3 = plt.xlabel('Delta nu 1 - Delta nu 2')\nhistogram = plt.hist(delta_dnus, nplotbins, histtype='stepfilled')\n\nplt.show()" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.figure", "numpy.median", "matplotlib.pyplot.show", "numpy.power", "matplotlib.pyplot.hist", "numpy.array", "numpy.std", "numpy.genfromtxt", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ] ]
canbakiskan/sparse_coding_frontend
[ "1f62b54824785aa441317ddab1baa3012f2fb401" ]
[ "src/utils/read_datasets.py" ]
[ "import torch\nfrom torchvision import datasets, transforms\n\nimport numpy as np\nfrom os.path import join\nfrom .namers import attack_file_namer\n\n\ndef tiny_imagenet(args):\n\n data_dir = join(args.directory, 'data')\n train_dir = join(data_dir, \"original_datasets\",\n \"tiny-imagenet-200\", \"train\")\n test_dir = join(data_dir, \"original_datasets\",\n \"tiny-imagenet-200\", \"val\")\n\n transform_train = transforms.Compose(\n [\n transforms.RandomCrop(64, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n )\n\n transform_test = transforms.Compose([transforms.ToTensor()])\n\n trainset = datasets.ImageFolder(train_dir, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(\n trainset, batch_size=args.neural_net.train_batch_size, shuffle=True, num_workers=2\n )\n\n testset = datasets.ImageFolder(test_dir, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2\n )\n\n return train_loader, test_loader\n\n\ndef tiny_imagenet_from_file(args):\n use_cuda = args.use_gpu and torch.cuda.is_available()\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n\n # Read\n if args.adv_testing.method == \"transfer\":\n filepath = join(\n args.directory, 'data',\n 'attacked_datasets', args.dataset.name, args.adv_testing.transfer_file\n )\n\n else:\n filepath = attack_file_namer(args)\n\n test_images = np.load(filepath)\n\n data_dir = join(args.directory, 'data')\n test_dir = join(data_dir, \"original_datasets\",\n \"tiny-imagenet-200\", \"val\")\n transform_test = transforms.Compose([transforms.ToTensor()])\n testset = datasets.ImageFolder(test_dir, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2\n )\n\n tensor_x = torch.Tensor(test_images / np.max(test_images))\n tensor_y = torch.Tensor(test_loader.dataset.targets).long()\n\n tensor_data = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n attack_loader = torch.utils.data.DataLoader(\n tensor_data, batch_size=args.neural_net.test_batch_size, shuffle=False, **kwargs\n )\n\n return attack_loader\n\n\ndef imagenette(args):\n\n data_dir = join(args.directory, 'data')\n train_dir = join(data_dir, \"original_datasets\",\n \"imagenette2-160\", \"train\")\n test_dir = join(data_dir, \"original_datasets\",\n \"imagenette2-160\", \"val\")\n\n use_cuda = args.use_gpu and torch.cuda.is_available()\n kwargs = {\"num_workers\": 4, \"pin_memory\": True} if use_cuda else {}\n\n transform_train = transforms.Compose(\n [\n transforms.RandomCrop((160), padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n )\n\n transform_test = transforms.Compose(\n [transforms.CenterCrop(160), transforms.ToTensor()]\n )\n\n trainset = datasets.ImageFolder(train_dir, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(\n trainset, batch_size=args.neural_net.train_batch_size, shuffle=True, num_workers=2\n )\n\n testset = datasets.ImageFolder(test_dir, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2\n )\n\n return train_loader, test_loader\n\n\ndef imagenette_from_file(args):\n use_cuda = args.use_gpu and torch.cuda.is_available()\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n\n # Read\n if args.adv_testing.method == \"transfer\":\n filepath = join(\n args.directory, 'data', 'attacked_datasets', args.dataset.name, args.adv_testing.transfer_file\n )\n\n else:\n filepath = attack_file_namer(args)\n\n test_images = np.load(filepath)\n\n data_dir = join(args.directory, 'data')\n test_dir = join(data_dir, \"original_datasets\",\n \"imagenette2-160\", \"val\")\n transform_test = transforms.Compose([transforms.ToTensor()])\n testset = datasets.ImageFolder(test_dir, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2\n )\n\n tensor_x = torch.Tensor(test_images / np.max(test_images))\n tensor_y = torch.Tensor(test_loader.dataset.targets).long()\n\n tensor_data = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n attack_loader = torch.utils.data.DataLoader(\n tensor_data, batch_size=args.neural_net.test_batch_size, shuffle=False, **kwargs\n )\n\n return attack_loader\n\n\ndef cifar10(args):\n\n use_cuda = args.use_gpu and torch.cuda.is_available()\n kwargs = {\"num_workers\": 4, \"pin_memory\": True} if use_cuda else {}\n\n transform_train = transforms.Compose(\n [\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n )\n\n transform_test = transforms.Compose([transforms.ToTensor()])\n\n trainset = datasets.CIFAR10(\n root=join(args.directory, 'data', 'original_datasets'),\n train=True,\n download=True,\n transform=transform_train,\n )\n train_loader = torch.utils.data.DataLoader(\n trainset, batch_size=args.neural_net.train_batch_size, shuffle=True, num_workers=2\n )\n\n testset = datasets.CIFAR10(\n root=join(args.directory, 'data', 'original_datasets'),\n train=False,\n download=True,\n transform=transform_test,\n )\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=args.neural_net.test_batch_size, shuffle=False, num_workers=2\n )\n\n return train_loader, test_loader\n\n\ndef cifar10_from_file(args):\n\n use_cuda = args.use_gpu and torch.cuda.is_available()\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n\n # Read\n if args.adv_testing.method == \"transfer\":\n filepath = join(\n args.directory, 'data', 'attacked_datasets', args.dataset.name, args.adv_testing.transfer_file\n )\n\n else:\n filepath = attack_file_namer(args)\n\n test_images = np.load(filepath)\n\n cifar10 = datasets.CIFAR10(\n join(args.directory, 'data', 'original_datasets'),\n train=False,\n transform=None,\n target_transform=None,\n download=False,\n )\n\n tensor_x = torch.Tensor(test_images / np.max(test_images))\n tensor_y = torch.Tensor(cifar10.targets).long()\n\n tensor_data = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n attack_loader = torch.utils.data.DataLoader(\n tensor_data, batch_size=args.neural_net.test_batch_size, shuffle=False, **kwargs\n )\n\n return attack_loader\n\n\ndef imagenet(args):\n\n data_dir = join(args.directory, 'data')\n train_dir = join(data_dir, \"original_datasets\", \"imagenet\", \"train\")\n test_dir = join(data_dir, \"original_datasets\", \"imagenet\", \"val\")\n\n use_cuda = args.use_gpu and torch.cuda.is_available()\n kwargs = {\"num_workers\": 4, \"pin_memory\": True} if use_cuda else {}\n\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n\n transform_train = transforms.Compose(\n [\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]\n )\n\n transform_test = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ]\n )\n\n trainset = datasets.ImageFolder(train_dir, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(\n trainset,\n batch_size=args.neural_net.train_batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=True,\n )\n\n testset = datasets.ImageFolder(test_dir, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(\n testset,\n batch_size=args.neural_net.test_batch_size,\n shuffle=False,\n num_workers=4,\n pin_memory=True,\n )\n\n return train_loader, test_loader\n\n\ndef read_dataset(args):\n\n if args.dataset.name == \"CIFAR10\":\n train_loader, test_loader = cifar10(args)\n elif args.dataset.name == \"Tiny-ImageNet\":\n train_loader, test_loader = tiny_imagenet(args)\n elif args.dataset.name == \"Imagenette\":\n train_loader, test_loader = imagenette(args)\n elif args.dataset.name == \"Imagenet\":\n train_loader, test_loader = imagenet(args)\n else:\n raise NotImplementedError\n\n return train_loader, test_loader\n\n\ndef read_test_dataset_from_file(args):\n\n if args.dataset.name == \"CIFAR10\":\n test_loader = cifar10_from_file(args)\n elif args.dataset.name == \"Tiny-ImageNet\":\n test_loader = tiny_imagenet_from_file(args)\n elif args.dataset.name == \"Imagenette\":\n test_loader = imagenette_from_file(args)\n else:\n raise NotImplementedError\n\n return test_loader\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.load", "torch.cuda.is_available", "numpy.max", "torch.utils.data.TensorDataset", "torch.Tensor" ] ]
man-vs-electron/datapanels
[ "d656bbd3c6071cc2d26bd27285001aee1fa7c82d" ]
[ "src/datapanels/gameoflife.py" ]
[ "\"\"\" DataPanels implementation of Conway's Game of Life\n\nSee https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life for details about\nthe game.\n\n\"\"\"\nimport random\nfrom typing import Tuple, Set, Optional, Union, List\nimport re\nimport numpy as np\nfrom kivy.lang.builder import Builder\nfrom kivy.properties import ListProperty, NumericProperty, ObjectProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.dropdown import DropDown\nfrom kivy.clock import Clock\nfrom kivy.app import App\nfrom kwidgets.uix.pixelatedgrid import PixelatedGrid\n\n\ndef translate(state: Set[Tuple[int, int]], x: int, y: int, additive: bool=False) -> Set[Tuple[int, int]]:\n \"\"\" Move the object the specified number of x and y values\n\n :param state: original state\n :param x: horizontal distance to move the object\n :param y: vertical distance to move the object\n :return: new, translated state\n \"\"\"\n return set([(t[0]+x, t[1]+y) for t in state]).union(state if additive else set())\n\n\ndef horizontal_flip(state: Set[Tuple[int, int]], additive: bool=False) -> Set[Tuple[int, int]]:\n \"\"\" Flip the object horizontally around the center\n\n :param state: original state\n :return: new modified state\n \"\"\"\n max_x = max([t[0] for t in state])\n min_x = min([t[0] for t in state])\n x_width = max_x-min_x\n\n return set([( x_width-t[0]+2*min_x, t[1] ) for t in state]).union(state if additive else set())\n\n\ndef vertical_flip(state: Set[Tuple[int, int]], additive: bool=False) -> Set[Tuple[int, int]]:\n \"\"\" Flip the object vertically around the center\n\n :param state: original state\n :return: new modified state\n \"\"\"\n max_y = max([t[1] for t in state])\n min_y = min([t[1] for t in state])\n y_width = max_y-min_y\n\n return set([( t[0], y_width-t[1]+2*min_y ) for t in state]).union(state if additive else set())\n\n\ndef rotate_90(state: Set[Tuple[int, int]], origin: Tuple[int, int], additive: bool=False) -> Set[Tuple[int, int]]:\n \"\"\" Rotate the object 90 degrees to the left with respect to the provide origin\n\n :param state: original state\n :param origin: The point of the rotation\n :return: new modified state\n \"\"\"\n return set([( origin[1]-t[1]+origin[0], t[0]-origin[0]+origin[1]) for t in state]).union(state if additive else set())\n\n\ndef multi_transform(state: Set[Tuple[int, int]], ops: List[Union[str, Tuple[str, int], Tuple[str, Tuple[int, int]]]], additive_final: bool=False) -> Set[Tuple[int, int]]:\n ans = set(state)\n for op in ops:\n if isinstance(op, tuple):\n if op[0].upper() == \"T\":\n ans = translate(ans, op[1], False)\n if op[0].upper() == \"R\":\n ans = rotate_90(ans, op[1], False)\n else:\n raise RuntimeError(\"Invalid syntax. Ops must be a list where each member is in the form (T, dist)|(R, (x,y)|H|V\")\n elif op.upper() == 'H':\n ans = horizontal_flip(ans, False)\n elif op.upper() == 'V':\n ans = vertical_flip(ans, False)\n else:\n raise RuntimeError(\"Invalid syntax. Ops must be a list where each member is in the form (T, dist)|(R, (x,y)|H|V\")\n return ans.union(state if additive_final else set())\n\n\ndef rle_decode(rle_text: str) -> Set[Tuple[int, int]]:\n \"\"\"\n Adapted from: https://github.com/Robert-Phan/python-conway/blob/master/main.py\n :param rle_text:\n :return:\n \"\"\"\n ans = []\n x=0\n y=0\n for g in re.findall(r\"\\d*b|\\d*o|\\d*\\$\", rle_text):\n num = 1 if len(g)==1 else int(g[:-1])\n code = g[-1]\n if code==\"$\":\n y += num\n x = 0\n if code==\"b\":\n x += num\n if code==\"o\":\n for j in range(0,num):\n ans.append((x,y))\n x += 1\n return set(ans)\n\n# Patterns taken from LifeWiki: https://conwaylife.com/wiki/Main_Page\n\nbasic_patterns = {\n \"glider\": rle_decode(\"bob$2bo$3o!\"),\n \"canada goose\": rle_decode(\"3o10b$o9b2ob$bo6b3obo$3b2o2b2o4b$4bo8b$8bo4b$4b2o3bo3b$3bobob2o4b$3bobo2bob2ob$2bo4b2o4b$2b2o9b$2b2o!\"),\n \"Copperhead\": rle_decode(\"b2o2b2o$3b2o$3b2o$obo2bobo$o6bo2$o6bo$b2o2b2o$2b4o2$3b2o$3b2o!\"),\n \"Coe Ship\": rle_decode(\"4b6o$2b2o5bo$2obo5bo$4bo3bob$6bo3b$6b2o2b$5b4ob$5b2ob2o$7b2o!\")\n}\n\n\ninitial_patterns = {\n \"R-pentomino\": ((1, 0), (0, 1), (1, 1), (1, 2), (2, 2)),\n \"Merzenich's p31\": rle_decode(\"7b2obo2bob2o7b$2o4bo2bo4bo2bo4b2o$2o5bobo4bobo5b2o$8bo6bo8b6$8bo6bo8b$2o5bobo4bobo5b2o$2o4bo2bo4bo2bo4b2o$7b2obo2bob2o!\"),\n \"68P16\": rle_decode(\"10b2o3b2o$10b2o3b2o$6bo$2o3b2o$2o2bo10bo$5bo3b2o2b2obo$5bo3bo6b2o2$2o$2o3bo7b2o$5b2o7bo3b2o$18b2o2$2b2o6bo3bo$3bob2o2b2o3bo$4bo10bo2b2o$13b2o3b2o$13bo$3b2o3b2o$3b2o3b2o!\"),\n \"65P48\": rle_decode(\"\"\"6bo3b2o$7b2ob2o$5bobo$6bo$2o2b3o5bobo2bo$2o9bob5o2$10bob4o$10bo5bo$11b2o2b2o$9bobobo3b2o$8bobo2b3o3bo$8bo2b2o4b2obo$7b2o4b3o3bo$13bo2b3o$16bo!\"\"\"),\n \"Traffic Circle\": rle_decode(\"\"\"21b2o4b2o19b$21bobo2bobo19b$23bo2bo21b$22b2o2b2o20b$21b3o2b3o19b$23bo\n2bo21b$31bo16b$30bob2o14b$34bo13b$26bo3bo2bobo12b$26bo5bo2bo12b$26bo6b\n2o13b$9b2o37b$8bo2bo10b3o3b3o17b$7bobobo36b$6b3obo15bo21b$6b3o17bo21b$\n26bo21b$12b3o33b$2o2bo16b3o24b$o2b2o5bo5bo31b$b5o4bo5bo2bo5bo17bo2b2o$\n10bo5bo2bo5bo17b2o2bo$19bo5bo7b3o6b5ob$b5o6b3o33b$o2b2o16b3o7bo5bo10b$\n2o2bo26bo5bo4b5ob$31bo5bo5b2o2bo$43bo2b2o$33b3o12b$39b2o7b$38b3o7b$37b\nob2o7b$36bobo9b$20b3o13bo2bo8b$37b2o9b$13b2o4bo2bo25b$12bo2bo32b$12bob\nobo31b$13bo2bo31b$17bo30b$14bobo31b$21bo2bo23b$19b3o2b3o21b$20b2o2b2o\n22b$21bo2bo23b$19bobo2bobo21b$19b2o4b2o!\"\"\"),\n \"Space Ship (160P10H2V0)\": rle_decode(\"\"\"7bobobo7b$6b7o6b$5bo7bo5b$b3ob3o3b3ob3ob$o17bo$bo7bo7bob$bob2o9b2obob\n2$2b3o9b3o2b$2b3o9b3o2b$5bo7bo5b$bo4bo5bo4bob$bobo11bobob$7bo3bo7b$7bo\n3bo7b$5bo2bobo2bo5b$5bo7bo5b$4bo9bo4b$4bobo5bobo4b$4b2obo3bob2o4b$5bob\n2ob2obo5b$7bobobo7b$8bobo8b$6bobobobo6b$5b2obobob2o5b$8bobo8b$7b2ob2o\n7b$4b3obobob3o4b$4bobobobobobo4b$4bo3bobo3bo4b$8bobo8b$4bo2b5o2bo4b$3b\n4o5b4o3b$b2o13b2ob$b2obo9bob2ob$2bobo4bo4bobo2b$4b2o7b2o!\"\"\"),\n \"Space Ship (Barge 2)\": rle_decode(\"\"\"14b3ob3o14b$13bo2bobo2bo13b$12bo3bobo3bo12b$7b3obo2bobobobo2bob3o7b$6b\no2bobo4bobo4bobo2bo6b$5bo3bobobobo3bobobobo3bo5b$5bo23bo5b$7bo19bo7b$\n4bobo21bobo4b$3b2obob3o13b3obob2o3b$2bobobo3bo13bo3bobobo2b$b2obo25bob\n2ob$o3bo5b2o11b2o5bo3bo2$2ob2o25b2ob2o!\"\"\"),\n}\n\n\nclass GameOfLifeEngine:\n \"\"\" Game of Life implementation\n\n This class implements the rules and mechanism for computing successive states. No graphical operations are\n conducted.\n\n The current state of the grid is stored in active_cells. This set of tuples contains the X,Y coordinates of all\n the live cells.\n\n The key idea here is that the state is simply the currently live cells. If a cell is going to change state, it\n either has to be a live cell or adjecent to a live cell. So only dealing with currently live cells and their\n neighbors reduces the amount of computation that has to be performed.\n\n \"\"\"\n x_max: int = 100\n y_max: int = 100\n active_cells: Set[Tuple[int, int]] = set()\n offsets: Set[Tuple[int, int]] = {(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)}\n\n def all_neighbors(self, x: int, y: int) -> List[Tuple[int, int]]:\n \"\"\" Get a list of the coordinates of all the neighbors of some cell.\n\n The coordinates (q,r) for the neighbors must be in the range (0<=q<=x_max, 0<=r<=y_max). Any coordinates\n outside this range are ignored. So if the neighbors for a cell at the edge of the board are rquested, only\n neighbors that are on the board are returned.\n\n :param x:\n :param y:\n :return: List of neighbors\n \"\"\"\n return [(x+xo, y+yo) for xo, yo in self.offsets if 0 <= (x + xo) <= self.x_max and 0 <= (y + yo) <= self.y_max]\n\n def is_active(self, x: int, y: int) -> int:\n \"\"\" Whether the indicated cell is alive\n\n :param x:\n :param y:\n :return: 1 if it is alive, 0 otherwise\n \"\"\"\n return 1 if (x, y) in self.active_cells else 0\n\n def num_active_neighbors(self, x, y) -> int:\n \"\"\" How many neighbors of (x,y) are alive\n\n :param x:\n :param y:\n :return:\n \"\"\"\n return sum([self.is_active(nx, ny) for nx, ny in self.all_neighbors(x, y)])\n\n def clear(self):\n \"\"\" Set active_cells to an empty set, indicating that no cells are alive\n\n :return:\n \"\"\"\n self.active_cells = set()\n\n def random(self, p: Union[float, int]):\n \"\"\" Set some cells randomly to being alive\n\n :param p: If in the range of 0 to 1, select that proportion of cells to make alive. Otherwise treat as the\n number of cells to make alive.\n :return:\n \"\"\"\n if p<1:\n numcells = int(self.x_max*self.y_max*p)\n else:\n numcells = int(p)\n self.active_cells.update(set([(np.random.randint(0, self.x_max), np.random.randint(0, self.y_max)) for _ in range(0, numcells)]))\n\n\n def step(self, x_max: Optional[int] = None, y_max: Optional[int] = None):\n \"\"\" Update the state.\n\n Run one generation.\n\n :param x_max: The largest x value for the visible board\n :param y_max: The largest y value for the visible board\n :return: In addition to setting the local active_cells variable, return the set of active cells.\n \"\"\"\n self.x_max = self.x_max if x_max is None else x_max\n self.y_max = self.y_max if y_max is None else y_max\n new_state = set()\n for c in self.active_cells:\n active_neighbors = self.num_active_neighbors(c[0], c[1])\n if active_neighbors == 2 or active_neighbors == 3:\n new_state.update([c])\n for neighbor in self.all_neighbors(c[0], c[1]):\n if neighbor not in self.active_cells and neighbor not in new_state:\n if self.num_active_neighbors(neighbor[0], neighbor[1]) == 3:\n new_state.update([neighbor])\n self.active_cells = new_state\n return self.active_cells\n\n\nBuilder.load_string('''\n<GameOfLifePanel>:\n orientation: 'vertical'\n BoxLayout:\n orientation: 'horizontal'\n size_hint: 1, None\n size: 0, 50\n Button:\n text: 'Random'\n on_press: root.new_random(.2)\n Button:\n text: 'Add 100 Random'\n on_press: root.gol.random(100)\n Button:\n text: 'Random Ship'\n on_press: root.random_small()\n Button:\n id: menu_btn\n text: 'Patterns'\n on_release: root.choose_patterns()\n PixelatedGrid:\n id: grid \n size_hint: 1,1 \n activated_color: root.activated_color\n background_color: root.background_color\n grid_color: root.grid_color\n cell_length: root.cell_length\n''')\n\n\nclass GameOfLifePanel(BoxLayout):\n \"\"\" The Kivy panel that displays the Game of Life, along with some controls.\n\n The user can randomize the screen with a button press, or add random live cells with another button press.\n\n Key Properties:\n * update_rate: number of seconds between each generation\n * random_cell_count: Either percentage of cells to make alive or the number of cells to make alive when randomizing.\n * background_color - RGBA list for the inactive cell color\n * grid_color - RGBA for the grid lines\n * activated_color - RGBA for the active cell color\n * cell_length - the length of the side of a cell (essentially cell size)\n\n \"\"\"\n gol: GameOfLifeEngine\n pattern_dropdown: ObjectProperty(None)\n activated_color = ListProperty([0, 1, 1, 1])\n background_color = ListProperty([0, 0, 0, 1])\n grid_color = ListProperty([47/255, 79/255, 79/255, 1])\n cell_length = NumericProperty(10)\n initialized = False\n update_event = None\n update_rate = NumericProperty(0.1)\n random_cell_count = NumericProperty(0.2)\n\n def __init__(self, **kwargs):\n \"\"\" Create a new GameOfLifePanel instance\n\n Creates a new engine instance.\n\n :param kwargs:\n \"\"\"\n super(GameOfLifePanel, self).__init__(**kwargs)\n self.gol = GameOfLifeEngine()\n self.pattern_dropdown = DropDown()\n for t in initial_patterns.keys():\n b = Button(text=t)\n b.size_hint_y = None\n b.height = 44\n b.bind(on_release = lambda btn: self.set_pattern(btn.text))\n self.pattern_dropdown.add_widget(b)\n\n def choose_patterns(self, *args):\n self.pattern_dropdown.open(self.ids.menu_btn)\n\n def random_small(self):\n k = np.random.choice(list(basic_patterns.keys()))\n pattern = basic_patterns[k]\n for _ in range(0,np.random.randint(0, 4)):\n pattern = rotate_90(pattern, (0,0))\n xt = np.random.randint(0, self.ids.grid.visible_width())\n yt = np.random.randint(0, self.ids.grid.visible_height())\n self.gol.active_cells = self.gol.active_cells.union(translate(pattern, xt, yt))\n\n\n\n def set_pattern(self, pattern_name):\n self.pattern_dropdown.select(None)\n self.gol.clear()\n pattern = initial_patterns[pattern_name]\n pattern_xt = int((self.ids.grid.visible_width()/2)-(max([p[0] for p in pattern])-min([p[0] for p in pattern]))/2)\n pattern_yt = int((self.ids.grid.visible_height()/2)-(max([p[1] for p in pattern])-min([p[1] for p in pattern]))/2)\n self.gol.active_cells = translate(initial_patterns[pattern_name], pattern_xt, pattern_yt)\n\n\n def gol_update(self, *args):\n \"\"\" Move the engine ahead one generation, passing in the current size of the grid\n\n :param args: Unused\n :return:\n \"\"\"\n new_state = self.gol.step(self.ids.grid.visible_width(), self.ids.grid.visible_height())\n self.ids.grid.activated_cells = new_state\n\n def new_random(self, p: Union[float, int], *args):\n \"\"\" Clear the grid and add a random number of living cells\n\n :param p: If in the range of 0 to 1, select that proportion of cells to make alive. Otherwise treat as the\n number of cells to make alive.\n :param args: Unused\n :return:\n \"\"\"\n self.gol.clear()\n self.gol.random(p)\n\n def dp_stop(self):\n \"\"\" Cancel the event that updates the grid.\n\n :return:\n \"\"\"\n if self.update_event is not None:\n self.update_event.cancel()\n self.update_event = None\n\n def dp_start(self):\n \"\"\" Start the event that updates the grid\n\n :return:\n \"\"\"\n if not self.initialized:\n self.gol.random(self.random_cell_count)\n self.initialized = True\n self.update_event = Clock.schedule_interval(self.gol_update, self.update_rate)\n\n\nclass GameOfLifeApp(App):\n \"\"\" For demonstration.\n\n \"\"\"\n\n def build(self):\n #panel = GameOfLifePanel()\n panel = Builder.load_string(\"\"\"\nGameOfLifePanel:\n cell_length: 5 \n\"\"\")\n panel.dp_start()\n return panel\n\n\nif __name__ == \"__main__\":\n GameOfLifeApp().run()\n" ]
[ [ "numpy.random.randint" ] ]
chensjtu/poxture
[ "f6abea1216c987f0e4c628b250054d764eaecf2e" ]
[ "models/losses.py" ]
[ "# this code is for calculate the VGGloss, which also called percetual loss.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models\nfrom typing import Union\nimport math\n\nclass VGG19(nn.Module):\n \"\"\"\n Sequential(\n (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (1): ReLU(inplace)\n (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ReLU(inplace)\n (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (6): ReLU(inplace)\n (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (8): ReLU(inplace)\n (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (11): ReLU(inplace)\n (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (13): ReLU(inplace)\n (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (15): ReLU(inplace)\n (16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (17): ReLU(inplace)\n (18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (20): ReLU(inplace)\n (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (22): ReLU(inplace)\n (23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (24): ReLU(inplace)\n (25): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (26): ReLU(inplace)\n (27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (29): ReLU(inplace)\n xxxx(30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n xxxx(31): ReLU(inplace)\n xxxx(32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n xxxx(33): ReLU(inplace)\n xxxx(34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n xxxx(35): ReLU(inplace)\n xxxx(36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n )\n \"\"\"\n\n def __init__(self, ckpt_path: Union[str, bool] = \"./extradata/assets/pretrains/vgg19-dcbb9e9d.pth\",\n requires_grad=False, before_relu=False):\n super(VGG19, self).__init__()\n\n if False:\n vgg_pretrained_features = models.vgg19(pretrained=False).features\n ckpt = torch.load(ckpt_path, map_location=\"cpu\")\n vgg_pretrained_features.load_state_dict(ckpt, strict=False)\n else:\n vgg_pretrained_features = models.vgg19(pretrained=True).features\n\n # print(f\"Loading vgg19 from {ckpt_path}...\")\n\n if before_relu:\n slice_ids = [1, 6, 11, 20, 29]\n else:\n slice_ids = [2, 7, 12, 21, 30]\n\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n for x in range(slice_ids[0]):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[0], slice_ids[1]):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[1], slice_ids[2]):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[2], slice_ids[3]):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[3], slice_ids[4]):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h_out1 = self.slice1(X)\n h_out2 = self.slice2(h_out1)\n h_out3 = self.slice3(h_out2)\n h_out4 = self.slice4(h_out3)\n h_out5 = self.slice5(h_out4)\n out = [h_out1, h_out2, h_out3, h_out4, h_out5]\n return out\n\nclass VGG16(nn.Module):\n \"\"\"\n Sequential(\n (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (1): ReLU(inplace=True)\n (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ReLU(inplace=True)\n (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (6): ReLU(inplace=True)\n (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (8): ReLU(inplace=True)\n (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (11): ReLU(inplace=True)\n (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (13): ReLU(inplace=True)\n (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (15): ReLU(inplace=True)\n (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (18): ReLU(inplace=True)\n (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (20): ReLU(inplace=True)\n (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (22): ReLU(inplace=True)\n (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (25): ReLU(inplace=True)\n xxxx(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n xxxx(27): ReLU(inplace=True)\n xxxx(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n xxxx(29): ReLU(inplace=True)\n xxxx(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n )\n \"\"\"\n\n def __init__(self, ckpt_path=False, requires_grad=False, before_relu=False):\n super(VGG16, self).__init__()\n vgg_pretrained_features = models.vgg16(pretrained=True).features\n print(\"loading vgg16 ...\")\n\n if before_relu:\n slice_ids = [1, 6, 11, 18, 25]\n else:\n slice_ids = [2, 7, 12, 19, 26]\n\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n for x in range(slice_ids[0]):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[0], slice_ids[1]):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[1], slice_ids[2]):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[2], slice_ids[3]):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[3], slice_ids[4]):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h_out1 = self.slice1(X)\n h_out2 = self.slice2(h_out1)\n h_out3 = self.slice3(h_out2)\n h_out4 = self.slice4(h_out3)\n h_out5 = self.slice5(h_out4)\n out = [h_out1, h_out2, h_out3, h_out4, h_out5]\n return out\n\nclass VGG11(nn.Module):\n \"\"\"\n Sequential(\n (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (1): ReLU(inplace=True)\n (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (4): ReLU(inplace=True)\n (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (6): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (7): ReLU(inplace=True)\n (8): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (9): ReLU(inplace=True)\n (10): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (11): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (12): ReLU(inplace=True)\n (13): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (14): ReLU(inplace=True)\n (15): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (16): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n (17): ReLU(inplace=True)\n (18): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n ###(19): ReLU(inplace=True)\n ###(20): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n )\n\n \"\"\"\n def __init__(self, ckpt_path=False, requires_grad=False, before_relu=False):\n super(VGG11, self).__init__()\n vgg_pretrained_features = models.vgg11(pretrained=True).features\n print(\"loading vgg11 ...\")\n\n if before_relu:\n slice_ids = [1, 4, 7, 12, 17]\n else:\n slice_ids = [2, 5, 8, 13, 18]\n\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n for x in range(slice_ids[0]):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[0], slice_ids[1]):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[1], slice_ids[2]):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[2], slice_ids[3]):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(slice_ids[3], slice_ids[4]):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h_out1 = self.slice1(X)\n h_out2 = self.slice2(h_out1)\n h_out3 = self.slice3(h_out2)\n h_out4 = self.slice4(h_out3)\n h_out5 = self.slice5(h_out4)\n out = [h_out1, h_out2, h_out3, h_out4, h_out5]\n return out\n\ndef gram_matrix(input):\n a, b, c, d = input.size() # a=batch size(=1)\n # b=number of feature maps\n # (c,d)=dimensions of a f. map (N=c*d)\n features = input.view(a * b, c * d) # resise F_XL into \\hat F_XL\n G = torch.mm(features, features.t()) # compute the gram product\n # we 'normalize' the values of the gram matrix\n # by dividing by the number of element in each feature maps.\n return G.div(a * b * c * d)\n\nclass Percetual_loss(nn.Module):\n def __init__(self, \n before_relu=False, slice_ids=(0, 1, 2, 3, 4), vgg_type=\"VGG19\", \n ckpt_path=False, resize=False, style_loss=False):\n super(Percetual_loss, self).__init__()\n self.device = torch.device('cuda')\n if vgg_type == \"VGG19\":\n self.vgg = VGG19(ckpt_path=ckpt_path, before_relu=before_relu).to(self.device)\n elif vgg_type == \"VGG16\":\n self.vgg = VGG16(ckpt_path=ckpt_path, before_relu=before_relu).to(self.device)\n else:\n self.vgg = VGG11(ckpt_path=ckpt_path, before_relu=before_relu).to(self.device)\n self.criterion = nn.L1Loss()\n # self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]\n self.weights = [1.0, 1.0, 1.0, 1.0, 1.0]\n self.slice_ids = slice_ids\n self.style_loss = style_loss\n self.resize = resize\n\n def forward(self, x, y):\n '''\n x_vgg[i]: fake img\n y_vgg[i].detach(): ground_truth\n '''\n if self.resize:\n x = F.interpolate(x, size=(224, 224), mode=\"bilinear\", align_corners=True)\n y = F.interpolate(y, size=(224, 224), mode=\"bilinear\", align_corners=True)\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n loss = 0\n style_loss = 0\n if self.style_loss:\n for i in self.slice_ids:\n style_loss += F.mse_loss(gram_matrix(x_vgg[i]), gram_matrix(y_vgg[i].detach()))\n\n for i in self.slice_ids:\n loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())\n\n return loss+style_loss\n\n\nclass Style_loss(nn.Module):\n # not used.\n def __init__(self, target_feature):\n super(Style_loss, self).__init__()\n self.vgg = VGG19(ckpt_path=ckpt_path, before_relu=before_relu).to(self.device)\n self.target = gram_matrix(target_feature).detach()\n\n def forward(self, input):\n G = gram_matrix(input)\n self.loss = F.mse_loss(G, self.target)\n return input\n\n\nif __name__ == \"__main__\":\n # percetual_loss = Percetual_loss(ckpt_path='./extradata/assets/checkpoints/losses/vgg19-dcbb9e9d.pth')\n percetual_loss = Percetual_loss()\n # model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg19', pretrained=True)\n # or any of these variants\n # model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg11_bn', pretrained=True)\n # model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg13', pretrained=True)\n # model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg13_bn', pretrained=True)\n # model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg16', pretrained=True)\n # model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg16_bn', pretrained=True)\n # model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg19', pretrained=True)\n # model = torch.hub.load('pytorch/vision:v0.6.0', 'vgg19_bn', pretrained=True)\n with torch.no_grad():\n for i in range(10):\n loss = percetual_loss(torch.zeros((4,3,256,256),device=0), torch.ones((4,3, 256, 256),device=0)) \n print(loss)\n print(\"lalala\")\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.ones", "torch.load", "torch.nn.L1Loss", "torch.no_grad", "torch.nn.Sequential", "torch.zeros", "torch.device", "torch.nn.functional.interpolate" ] ]
dssg/babies-public
[ "0a03e95992bfd7b7b4c2f11b8a5e2c3961f193c6" ]
[ "webapp/proto.py" ]
[ "import pandas as pd\nimport numpy as np\nimport psycopg2\nfrom sqlalchemy import create_engine\nimport json\nimport sys\nfrom sklearn.externals import joblib\nimport os\n\ndef run_all():\n\n # connect to postgres \n params = json.load(open('/home/ipan/passwords/psql_psycopg2.password', 'r'))\n\n try:\n conn = psycopg2.connect(**params)\n conn.autocommit\n cur = conn.cursor()\n\n except:\n print('Unable to connect to database')\n\n # import from babysaver\n sys.path.insert(0, '/home/ipan/babies/')\n from babysaver import features\n from babysaver import models\n from babysaver.models import WeightedQuestions\n from sklearn.linear_model import LogisticRegression\n from babysaver import evaluation\n\n # specify dat configuration in a dictionary\n config_add1 = {'Features': None, \n 'Include 707G?': 'Y', \n '707G Questions': range(35,52), \n '707G Start Date': '2014-07-01', \n '707G End Date': None,\n 'Include 711?': 'N', \n '711 Questions': None, \n '711 Start Date': None, \n '711 End Date': None, \n 'Include FCM?': 'Y', \n 'Include BBO?': 'Y', \n 'Include other?': 'Y', \n 'Outcome': 'ADVB1_OTC'}\n\n # use config_writer to write dictionary to csv file \n features.config_writer(config_add1, '/home/ipan/configs/config_add1.csv')\n # then use that csv file to load in the data \n data_dct = features.data_getter('/home/ipan/configs/config_add1.csv', \n conn=conn, \n unique_identifier='UNI_PART_ID_I', \n impute='fill_mode',\n interactions=False)\n\n # specify hyperparameter lists\n c_list = [1e-4, 1e-3, 0.01, 0.1, 1, 10, 100, 1e3, 1e4, 1e20]\n penalties = ['l2']\n class_wgts = [None, 'auto']\n\n wgt_schemes = ['odds_ratio_relative', 'odds_ratio_absolute', \n 'marginal_effects', 'positive_coefs']\n\n # specify classifier dictionaries \n expand_wgt = {'clf': WeightedQuestions,\n 'param_dict': {'C': c_list,\n 'penalty': penalties, \n 'class_weight': class_wgts,\n 'weight_scheme': wgt_schemes,\n 'round_dec': [1]\n }\n }\n\n simple_wgt = {'clf': WeightedQuestions,\n 'param_dict': {'C': c_list,\n 'penalty': penalties, \n 'class_weight': class_wgts,\n 'weight_scheme': wgt_schemes,\n 'round_dec': [0]\n }\n }\n\n\n log_lib = {'clf': LogisticRegression,\n 'param_dict': {'C': c_list,\n 'penalty': penalties,\n 'class_weight': class_wgts\n }\n }\n\n # specify list of k for precision at k\n k_list = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3]\n\n # train a bunch of classifiers for each type of classifier\n # I wanted to find the best one of each, so I did each one separately\n expand_evals, expand_pkls = models.machine_learner(data_dct, \n clf_library=expand_wgt,\n pkl_folder='e_pkls',\n cv='kfold_cv',\n k=k_list,\n n_folds=10)\n\n simple_evals, simple_pkls = models.machine_learner(data_dct,\n clf_library=simple_wgt,\n pkl_folder='s_pkls',\n cv='kfold_cv',\n k=k_list,\n n_folds=10)\n\n log_evals, log_pkls = models.machine_learner(data_dct,\n clf_library=log_lib,\n pkl_folder='log_pkls',\n cv='kfold_cv',\n k=k_list,\n n_folds=10)\n\n # concatenate all the dataframes into one dataframe using\n # output of machine learner \n expand_df = evaluation.dict_to_dataframe(expand_evals, expand_pkls)\n simple_df = evaluation.dict_to_dataframe(simple_evals, simple_pkls)\n log_df = evaluation.dict_to_dataframe(log_evals, log_pkls)\n\n # metric(s) to sort classifiers by \n sort_metrics = ['precision at 0.1 mean', 'precision at 0.15 mean']\n # mapping between question number and text\n map_file = '/home/ipan/707G_question_map.csv'\n\n # get a dataframe with weights and question text\n expand_wgts = evaluation.weight_mapper(data_dct, expand_df, \n sort_metrics, map_file, '707G')\n expand_wgts.columns = ['QID', 'Question', 'Expanded Weights']\n simple_wgts = evaluation.weight_mapper(data_dct, simple_df,\n sort_metrics, map_file, '707G')\n simple_wgts.columns = ['QID', 'Question', 'Simple Weights']\n log_wgts = evaluation.weight_mapper(data_dct, log_df, sort_metrics,\n map_file, '707G')\n \n all_wgts = log_wgts.join([expand_wgts['Expanded Weights'], \n simple_wgts['Simple Weights']])\n\n # load in models \n log_df = log_df.sort(sort_metrics, ascending=False)\n log_model = joblib.load(log_df['pickle_file'][0])\n ew_model = joblib.load(expand_df.sort(sort_metrics, ascending=False)['pickle_file'][0])\n sw_model = joblib.load(simple_df.sort(sort_metrics, ascending=False)['pickle_file'][0])\n\n df = data_dct['dataframe']\n feats = data_dct['features']\n log_scores = log_model.predict_proba(df[feats])[:,1]\n pd.DataFrame({'scores': log_scores}).to_csv('scores.csv', index=False)\n # calculate overall rate of adverse births\n baseline_rate = np.round(df[data_dct['outcome']].mean()*100,1)\n\n # calculate scores \n ew_scores = ew_model.predict_proba(df[feats])[:,1]\n sw_scores = sw_model.predict_proba(df[feats])[:,1]\n\n # get metrics for various values of k\n expand_mets = evaluation.metrics_getter(data_dct, expand_df,\n sort_metrics, map_file,\n k_list, ew_scores)\n simple_mets = evaluation.metrics_getter(data_dct, simple_df,\n sort_metrics, map_file, \n k_list, sw_scores)\n log_mets = evaluation.metrics_getter(data_dct, log_df,\n sort_metrics, map_file,\n k_list, log_scores, scale=True)\n\n if not os.path.exists('best_pkl/'):\n os.makedirs('best_pkl/')\n\n # pickle the best logistic regression model for webapp prediction tool\n joblib.dump(log_model, 'best_pkl/best_model.pkl')\n\n return evaluation.weight_html(all_wgts), log_mets.to_html(), expand_mets.to_html(), simple_mets.to_html(), baseline_rate\n" ]
[ [ "sklearn.externals.joblib.dump", "pandas.DataFrame", "sklearn.externals.joblib.load" ] ]
MingSungChao/IPN-hand
[ "0b061e4438f159e3e312af4959cb424917b5c367" ]
[ "test.py" ]
[ "import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport time\nimport os\nimport sys\nimport json\nimport pdb\n\nfrom utils import AverageMeter\n\n\ndef calculate_video_results(output_buffer, video_id, test_results, class_names):\n video_outputs = torch.stack(output_buffer)\n average_scores = torch.mean(video_outputs, dim=0)\n sorted_scores, locs = torch.topk(average_scores, k=2)\n\n video_results = []\n for i in range(sorted_scores.size(0)):\n video_results.append({\n 'label': class_names[locs[i].item()],\n 'score': sorted_scores[i].item()\n })\n test_results['results'][video_id] = video_results\n\n\ndef test(data_loader, model, opt, class_names):\n print('test')\n\n model.eval()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n end_time = time.time()\n output_buffer = []\n previous_video_id = ''\n test_results = {'results': {}}\n for i, (inputs, targets) in enumerate(data_loader):\n data_time.update(time.time() - end_time)\n \n \n with torch.no_grad():\n inputs = Variable(inputs)\n targets = Variable(targets)\n outputs = model(inputs)\n\n if not opt.no_softmax_in_test:\n outputs = F.softmax(outputs)\n\n for j in range(outputs.size(0)):\n if not (i == 0 and j == 0) and targets[j].item() != previous_video_id:\n calculate_video_results(output_buffer, previous_video_id,\n test_results, class_names)\n output_buffer = []\n output_buffer.append(outputs[j].data.cpu())\n previous_video_id = targets[j].item()\n\n if (i % 100) == 0:\n with open(\n os.path.join(opt.result_path, '{}.json'.format(\n opt.test_subset)), 'w') as f:\n json.dump(test_results, f)\n\n batch_time.update(time.time() - end_time)\n end_time = time.time()\n\n print('[{}/{}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'.format(\n i + 1,\n len(data_loader),\n batch_time=batch_time,\n data_time=data_time))\n with open(\n os.path.join(opt.result_path, '{}.json'.format(opt.test_subset)),\n 'w') as f:\n json.dump(test_results, f)\n" ]
[ [ "torch.stack", "torch.nn.functional.softmax", "torch.autograd.Variable", "torch.no_grad", "torch.topk", "torch.mean" ] ]
mehrdad-shokri/astropy
[ "abd73b51277694338c8eca7639da956dcd06f207" ]
[ "astropy/utils/data_info.py" ]
[ "# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"This module contains functions and methods that relate to the DataInfo class\nwhich provides a container for informational attributes as well as summary info\nmethods.\n\nA DataInfo object is attached to the Quantity, SkyCoord, and Time classes in\nastropy. Here it allows those classes to be used in Tables and uniformly carry\ntable column attributes such as name, format, dtype, meta, and description.\n\"\"\"\n\n# Note: these functions and classes are tested extensively in astropy table\n# tests via their use in providing mixin column info, and in\n# astropy/tests/test_info for providing table and column info summary data.\n\n\nimport os\nimport re\nimport sys\nimport weakref\nimport warnings\nfrom io import StringIO\nfrom copy import deepcopy\nfrom functools import partial\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom . import metadata\n\n\n__all__ = ['data_info_factory', 'dtype_info_name', 'BaseColumnInfo',\n 'DataInfo', 'MixinInfo', 'ParentDtypeInfo']\n\n# Tuple of filterwarnings kwargs to ignore when calling info\nIGNORE_WARNINGS = (dict(category=RuntimeWarning, message='All-NaN|'\n 'Mean of empty slice|Degrees of freedom <= 0|'\n 'invalid value encountered in sqrt'),)\n\nSTRING_TYPE_NAMES = {(False, 'S'): 'str', # not PY3\n (False, 'U'): 'unicode',\n (True, 'S'): 'bytes', # PY3\n (True, 'U'): 'str'}\n\n\n@contextmanager\ndef serialize_context_as(context):\n \"\"\"Set context for serialization.\n\n This will allow downstream code to understand the context in which a column\n is being serialized. Objects like Time or SkyCoord will have different\n default serialization representations depending on context.\n\n Parameters\n ----------\n context : str\n Context name, e.g. 'fits', 'hdf5', 'ecsv', 'yaml'\n \"\"\"\n old_context = BaseColumnInfo._serialize_context\n BaseColumnInfo._serialize_context = context\n yield\n BaseColumnInfo._serialize_context = old_context\n\n\ndef dtype_info_name(dtype):\n \"\"\"Return a human-oriented string name of the ``dtype`` arg.\n This can be use by astropy methods that present type information about\n a data object.\n\n The output is mostly equivalent to ``dtype.name`` which takes the form\n <type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an\n optional number of bits which gets included only for numeric types.\n\n For bytes, string and unicode types, the output is shown below, where <N>\n is the number of characters. This representation corresponds to the Python\n type that matches the dtype::\n\n Numpy S<N> U<N>\n Python bytes<N> str<N>\n\n Parameters\n ----------\n dtype : str, np.dtype, type\n Input dtype as an object that can be converted via np.dtype()\n\n Returns\n -------\n dtype_info_name : str\n String name of ``dtype``\n \"\"\"\n dtype = np.dtype(dtype)\n if dtype.kind in ('S', 'U'):\n length = re.search(r'(\\d+)', dtype.str).group(1)\n type_name = STRING_TYPE_NAMES[(True, dtype.kind)]\n out = type_name + length\n else:\n out = dtype.name\n\n return out\n\n\ndef data_info_factory(names, funcs):\n \"\"\"\n Factory to create a function that can be used as an ``option``\n for outputting data object summary information.\n\n Examples\n --------\n >>> from astropy.utils.data_info import data_info_factory\n >>> from astropy.table import Column\n >>> c = Column([4., 3., 2., 1.])\n >>> mystats = data_info_factory(names=['min', 'median', 'max'],\n ... funcs=[np.min, np.median, np.max])\n >>> c.info(option=mystats)\n min = 1.0\n median = 2.5\n max = 4.0\n n_bad = 0\n length = 4\n\n Parameters\n ----------\n names : list\n List of information attribute names\n funcs : list\n List of functions that compute the corresponding information attribute\n\n Returns\n -------\n func : function\n Function that can be used as a data info option\n \"\"\"\n def func(dat):\n outs = []\n for name, func in zip(names, funcs):\n try:\n if isinstance(func, str):\n out = getattr(dat, func)()\n else:\n out = func(dat)\n except Exception:\n outs.append('--')\n else:\n outs.append(str(out))\n\n return OrderedDict(zip(names, outs))\n return func\n\n\ndef _get_obj_attrs_map(obj, attrs):\n \"\"\"\n Get the values for object ``attrs`` and return as a dict. This\n ignores any attributes that are None and in Py2 converts any unicode\n attribute names or values to str. In the context of serializing the\n supported core astropy classes this conversion will succeed and results\n in more succinct and less python-specific YAML.\n \"\"\"\n out = {}\n for attr in attrs:\n val = getattr(obj, attr, None)\n\n if val is not None:\n out[attr] = val\n return out\n\n\ndef _get_data_attribute(dat, attr=None):\n \"\"\"\n Get a data object attribute for the ``attributes`` info summary method\n \"\"\"\n if attr == 'class':\n val = type(dat).__name__\n elif attr == 'dtype':\n val = dtype_info_name(dat.info.dtype)\n elif attr == 'shape':\n datshape = dat.shape[1:]\n val = datshape if datshape else ''\n else:\n val = getattr(dat.info, attr)\n if val is None:\n val = ''\n return str(val)\n\n\nclass InfoAttribute:\n def __init__(self, attr, default=None):\n self.attr = attr\n self.default = default\n\n def __get__(self, instance, owner_cls):\n if instance is None:\n return self\n\n return instance._attrs.get(self.attr, self.default)\n\n def __set__(self, instance, value):\n if instance is None:\n # This is an unbound descriptor on the class\n raise ValueError('cannot set unbound descriptor')\n\n instance._attrs[self.attr] = value\n\n\nclass ParentAttribute:\n def __init__(self, attr):\n self.attr = attr\n\n def __get__(self, instance, owner_cls):\n if instance is None:\n return self\n\n return getattr(instance._parent, self.attr)\n\n def __set__(self, instance, value):\n if instance is None:\n # This is an unbound descriptor on the class\n raise ValueError('cannot set unbound descriptor')\n\n setattr(instance._parent, self.attr, value)\n\n\nclass DataInfoMeta(type):\n def __new__(mcls, name, bases, dct):\n # Ensure that we do not gain a __dict__, which would mean\n # arbitrary attributes could be set.\n dct.setdefault('__slots__', [])\n return super().__new__(mcls, name, bases, dct)\n\n def __init__(cls, name, bases, dct):\n super().__init__(name, bases, dct)\n\n # Define default getters/setters for attributes, if needed.\n for attr in cls.attr_names:\n if attr not in dct:\n # If not defined explicitly for this class, did any of\n # its superclasses define it, and, if so, was this an\n # automatically defined look-up-on-parent attribute?\n cls_attr = getattr(cls, attr, None)\n if attr in cls.attrs_from_parent:\n # If the attribute is supposed to be stored on the parent,\n # and that is stated by this class yet it was not the case\n # on the superclass, override it.\n if 'attrs_from_parent' in dct and not isinstance(cls_attr, ParentAttribute):\n setattr(cls, attr, ParentAttribute(attr))\n elif not cls_attr or isinstance(cls_attr, ParentAttribute):\n # If the attribute is not meant to be stored on the parent,\n # and if it was not defined already or was previously defined\n # as an attribute on the parent, define a regular\n # look-up-on-info attribute\n setattr(cls, attr,\n InfoAttribute(attr, cls._attr_defaults.get(attr)))\n\n\nclass DataInfo(metaclass=DataInfoMeta):\n \"\"\"\n Descriptor that data classes use to add an ``info`` attribute for storing\n data attributes in a uniform and portable way. Note that it *must* be\n called ``info`` so that the DataInfo() object can be stored in the\n ``instance`` using the ``info`` key. Because owner_cls.x is a descriptor,\n Python doesn't use __dict__['x'] normally, and the descriptor can safely\n store stuff there. Thanks to http://nbviewer.ipython.org/urls/gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb\n for this trick that works for non-hashable classes.\n\n Parameters\n ----------\n bound : bool\n If True this is a descriptor attribute in a class definition, else it\n is a DataInfo() object that is bound to a data object instance. Default is False.\n \"\"\"\n _stats = ['mean', 'std', 'min', 'max']\n attrs_from_parent = set()\n attr_names = set(['name', 'unit', 'dtype', 'format', 'description', 'meta'])\n _attr_defaults = {'dtype': np.dtype('O')}\n _attrs_no_copy = set()\n _info_summary_attrs = ('dtype', 'shape', 'unit', 'format', 'description', 'class')\n __slots__ = ['_parent_cls', '_parent_ref', '_attrs']\n # This specifies the list of object attributes which must be stored in\n # order to re-create the object after serialization. This is independent\n # of normal `info` attributes like name or description. Subclasses will\n # generally either define this statically (QuantityInfo) or dynamically\n # (SkyCoordInfo). These attributes may be scalars or arrays. If arrays\n # that match the object length they will be serialized as an independent\n # column.\n _represent_as_dict_attrs = ()\n\n # This specifies attributes which are to be provided to the class\n # initializer as ordered args instead of keyword args. This is needed\n # for Quantity subclasses where the keyword for data varies (e.g.\n # between Quantity and Angle).\n _construct_from_dict_args = ()\n\n # This specifies the name of an attribute which is the \"primary\" data.\n # Then when representing as columns\n # (table.serialize._represent_mixin_as_column) the output for this\n # attribute will be written with the just name of the mixin instead of the\n # usual \"<name>.<attr>\".\n _represent_as_dict_primary_data = None\n\n def __init__(self, bound=False):\n # If bound to a data object instance then create the dict of attributes\n # which stores the info attribute values. Default of None for \"unset\"\n # except for dtype where the default is object.\n if bound:\n self._attrs = {}\n\n @property\n def _parent(self):\n try:\n parent = self._parent_ref()\n except AttributeError:\n return None\n\n if parent is None:\n raise AttributeError(\"\"\"\\\nfailed access \"info\" attribute on a temporary object.\n\nIt looks like you have done something like ``col[3:5].info``, i.e.\nyou accessed ``info`` from a temporary slice object ``col[3:5]`` that\nonly exists momentarily. This has failed because the reference to\nthat temporary object is now lost. Instead force a permanent\nreference with ``c = col[3:5]`` followed by ``c.info``.\"\"\")\n\n return parent\n\n def __get__(self, instance, owner_cls):\n if instance is None:\n # This is an unbound descriptor on the class\n self._parent_cls = owner_cls\n return self\n\n info = instance.__dict__.get('info')\n if info is None:\n info = instance.__dict__['info'] = self.__class__(bound=True)\n # We set _parent_ref on every call, since if one makes copies of\n # instances, 'info' will be copied as well, which will lose the\n # reference.\n info._parent_ref = weakref.ref(instance)\n return info\n\n def __set__(self, instance, value):\n if instance is None:\n # This is an unbound descriptor on the class\n raise ValueError('cannot set unbound descriptor')\n\n if isinstance(value, DataInfo):\n info = instance.__dict__['info'] = self.__class__(bound=True)\n for attr in info.attr_names - info.attrs_from_parent - info._attrs_no_copy:\n info._attrs[attr] = deepcopy(getattr(value, attr))\n\n else:\n raise TypeError('info must be set with a DataInfo instance')\n\n def __getstate__(self):\n return self._attrs\n\n def __setstate__(self, state):\n self._attrs = state\n\n def _represent_as_dict(self, attrs=None):\n \"\"\"Get the values for the parent ``attrs`` and return as a dict.\n\n By default, uses '_represent_as_dict_attrs'.\n \"\"\"\n if attrs is None:\n attrs = self._represent_as_dict_attrs\n return _get_obj_attrs_map(self._parent, attrs)\n\n def _construct_from_dict(self, map):\n args = [map.pop(attr) for attr in self._construct_from_dict_args]\n return self._parent_cls(*args, **map)\n\n info_summary_attributes = staticmethod(\n data_info_factory(names=_info_summary_attrs,\n funcs=[partial(_get_data_attribute, attr=attr)\n for attr in _info_summary_attrs]))\n\n # No nan* methods in numpy < 1.8\n info_summary_stats = staticmethod(\n data_info_factory(names=_stats,\n funcs=[getattr(np, 'nan' + stat)\n for stat in _stats]))\n\n def __call__(self, option='attributes', out=''):\n \"\"\"\n Write summary information about data object to the ``out`` filehandle.\n By default this prints to standard output via sys.stdout.\n\n The ``option`` argument specifies what type of information\n to include. This can be a string, a function, or a list of\n strings or functions. Built-in options are:\n\n - ``attributes``: data object attributes like ``dtype`` and ``format``\n - ``stats``: basic statistics: min, mean, and max\n\n If a function is specified then that function will be called with the\n data object as its single argument. The function must return an\n OrderedDict containing the information attributes.\n\n If a list is provided then the information attributes will be\n appended for each of the options, in order.\n\n Examples\n --------\n\n >>> from astropy.table import Column\n >>> c = Column([1, 2], unit='m', dtype='int32')\n >>> c.info()\n dtype = int32\n unit = m\n class = Column\n n_bad = 0\n length = 2\n\n >>> c.info(['attributes', 'stats'])\n dtype = int32\n unit = m\n class = Column\n mean = 1.5\n std = 0.5\n min = 1\n max = 2\n n_bad = 0\n length = 2\n\n Parameters\n ----------\n option : str, function, list of (str or function)\n Info option, defaults to 'attributes'.\n out : file-like object, None\n Output destination, defaults to sys.stdout. If None then the\n OrderedDict with information attributes is returned\n\n Returns\n -------\n info : OrderedDict if out==None else None\n \"\"\"\n if out == '':\n out = sys.stdout\n\n dat = self._parent\n info = OrderedDict()\n name = dat.info.name\n if name is not None:\n info['name'] = name\n\n options = option if isinstance(option, (list, tuple)) else [option]\n for option in options:\n if isinstance(option, str):\n if hasattr(self, 'info_summary_' + option):\n option = getattr(self, 'info_summary_' + option)\n else:\n raise ValueError('option={} is not an allowed information type'\n .format(option))\n\n with warnings.catch_warnings():\n for ignore_kwargs in IGNORE_WARNINGS:\n warnings.filterwarnings('ignore', **ignore_kwargs)\n info.update(option(dat))\n\n if hasattr(dat, 'mask'):\n n_bad = np.count_nonzero(dat.mask)\n else:\n try:\n n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat))\n except Exception:\n n_bad = 0\n info['n_bad'] = n_bad\n\n try:\n info['length'] = len(dat)\n except (TypeError, IndexError):\n pass\n\n if out is None:\n return info\n\n for key, val in info.items():\n if val != '':\n out.write(f'{key} = {val}' + os.linesep)\n\n def __repr__(self):\n if self._parent is None:\n return super().__repr__()\n\n out = StringIO()\n self.__call__(out=out)\n return out.getvalue()\n\n\nclass BaseColumnInfo(DataInfo):\n \"\"\"\n Base info class for anything that can be a column in an astropy\n Table. There are at least two classes that inherit from this:\n\n ColumnInfo: for native astropy Column / MaskedColumn objects\n MixinInfo: for mixin column objects\n\n Note that this class is defined here so that mixins can use it\n without importing the table package.\n \"\"\"\n attr_names = DataInfo.attr_names.union(['parent_table', 'indices'])\n _attrs_no_copy = set(['parent_table'])\n\n # Context for serialization. This can be set temporarily via\n # ``serialize_context_as(context)`` context manager to allow downstream\n # code to understand the context in which a column is being serialized.\n # Typical values are 'fits', 'hdf5', 'ecsv', 'yaml'. Objects like Time or\n # SkyCoord will have different default serialization representations\n # depending on context.\n _serialize_context = None\n __slots__ = ['_format_funcs', '_copy_indices']\n\n @property\n def parent_table(self):\n value = self._attrs.get('parent_table')\n if callable(value):\n value = value()\n return value\n\n @parent_table.setter\n def parent_table(self, parent_table):\n if parent_table is None:\n self._attrs.pop('parent_table', None)\n else:\n parent_table = weakref.ref(parent_table)\n self._attrs['parent_table'] = parent_table\n\n def __init__(self, bound=False):\n super().__init__(bound=bound)\n\n # If bound to a data object instance then add a _format_funcs dict\n # for caching functions for print formatting.\n if bound:\n self._format_funcs = {}\n\n def iter_str_vals(self):\n \"\"\"\n This is a mixin-safe version of Column.iter_str_vals.\n \"\"\"\n col = self._parent\n if self.parent_table is None:\n from astropy.table.column import FORMATTER as formatter\n else:\n formatter = self.parent_table.formatter\n\n _pformat_col_iter = formatter._pformat_col_iter\n for str_val in _pformat_col_iter(col, -1, False, False, {}):\n yield str_val\n\n def adjust_indices(self, index, value, col_len):\n '''\n Adjust info indices after column modification.\n\n Parameters\n ----------\n index : slice, int, list, or ndarray\n Element(s) of column to modify. This parameter can\n be a single row number, a list of row numbers, an\n ndarray of row numbers, a boolean ndarray (a mask),\n or a column slice.\n value : int, list, or ndarray\n New value(s) to insert\n col_len : int\n Length of the column\n '''\n if not self.indices:\n return\n\n if isinstance(index, slice):\n # run through each key in slice\n t = index.indices(col_len)\n keys = list(range(*t))\n elif isinstance(index, np.ndarray) and index.dtype.kind == 'b':\n # boolean mask\n keys = np.where(index)[0]\n else: # single int\n keys = [index]\n\n value = np.atleast_1d(value) # turn array(x) into array([x])\n if value.size == 1:\n # repeat single value\n value = list(value) * len(keys)\n\n for key, val in zip(keys, value):\n for col_index in self.indices:\n col_index.replace(key, self.name, val)\n\n def slice_indices(self, col_slice, item, col_len):\n '''\n Given a sliced object, modify its indices\n to correctly represent the slice.\n\n Parameters\n ----------\n col_slice : Column or mixin\n Sliced object\n item : slice, list, or ndarray\n Slice used to create col_slice\n col_len : int\n Length of original object\n '''\n from astropy.table.sorted_array import SortedArray\n if not getattr(self, '_copy_indices', True):\n # Necessary because MaskedArray will perform a shallow copy\n col_slice.info.indices = []\n return col_slice\n elif isinstance(item, slice):\n col_slice.info.indices = [x[item] for x in self.indices]\n elif self.indices:\n if isinstance(item, np.ndarray) and item.dtype.kind == 'b':\n # boolean mask\n item = np.where(item)[0]\n threshold = 0.6\n # Empirical testing suggests that recreating a BST/RBT index is\n # more effective than relabelling when less than ~60% of\n # the total number of rows are involved, and is in general\n # more effective for SortedArray.\n small = len(item) <= 0.6 * col_len\n col_slice.info.indices = []\n for index in self.indices:\n if small or isinstance(index, SortedArray):\n new_index = index.get_slice(col_slice, item)\n else:\n new_index = deepcopy(index)\n new_index.replace_rows(item)\n col_slice.info.indices.append(new_index)\n\n return col_slice\n\n @staticmethod\n def merge_cols_attributes(cols, metadata_conflicts, name, attrs):\n \"\"\"\n Utility method to merge and validate the attributes ``attrs`` for the\n input table columns ``cols``.\n\n Note that ``dtype`` and ``shape`` attributes are handled specially.\n These should not be passed in ``attrs`` but will always be in the\n returned dict of merged attributes.\n\n Parameters\n ----------\n cols : list\n List of input Table column objects\n metadata_conflicts : str ('warn'|'error'|'silent')\n How to handle metadata conflicts\n name : str\n Output column name\n attrs : list\n List of attribute names to be merged\n\n Returns\n -------\n attrs : dict of merged attributes\n\n \"\"\"\n from astropy.table.np_utils import TableMergeError\n\n def warn_str_func(key, left, right):\n out = (\"In merged column '{}' the '{}' attribute does not match \"\n \"({} != {}). Using {} for merged output\"\n .format(name, key, left, right, right))\n return out\n\n def getattrs(col):\n return {attr: getattr(col.info, attr) for attr in attrs\n if getattr(col.info, attr, None) is not None}\n\n out = getattrs(cols[0])\n for col in cols[1:]:\n out = metadata.merge(out, getattrs(col), metadata_conflicts=metadata_conflicts,\n warn_str_func=warn_str_func)\n\n # Output dtype is the superset of all dtypes in in_cols\n out['dtype'] = metadata.common_dtype(cols)\n\n # Make sure all input shapes are the same\n uniq_shapes = set(col.shape[1:] for col in cols)\n if len(uniq_shapes) != 1:\n raise TableMergeError('columns have different shapes')\n out['shape'] = uniq_shapes.pop()\n\n # \"Merged\" output name is the supplied name\n if name is not None:\n out['name'] = name\n\n return out\n\n def get_sortable_arrays(self):\n \"\"\"\n Return a list of arrays which can be lexically sorted to represent\n the order of the parent column.\n\n The base method raises NotImplementedError and must be overridden.\n\n Returns\n -------\n arrays : list of ndarray\n \"\"\"\n raise NotImplementedError(f'column {self.name} is not sortable')\n\n\nclass MixinInfo(BaseColumnInfo):\n\n @property\n def name(self):\n return self._attrs.get('name')\n\n @name.setter\n def name(self, name):\n # For mixin columns that live within a table, rename the column in the\n # table when setting the name attribute. This mirrors the same\n # functionality in the BaseColumn class.\n if self.parent_table is not None:\n from astropy.table.np_utils import fix_column_name\n new_name = fix_column_name(name) # Ensure col name is numpy compatible\n self.parent_table.columns._rename_column(self.name, new_name)\n\n self._attrs['name'] = name\n\n\nclass ParentDtypeInfo(MixinInfo):\n \"\"\"Mixin that gets info.dtype from parent\"\"\"\n\n attrs_from_parent = set(['dtype']) # dtype and unit taken from parent\n" ]
[ [ "numpy.dtype", "numpy.isinf", "numpy.count_nonzero", "numpy.atleast_1d", "numpy.isnan", "numpy.where" ] ]
zchvsre/TreeCorr
[ "825dc0a9d4754f9d98ebcf9c26dee9597915d650" ]
[ "treecorr/binnedcorr3.py" ]
[ "# Copyright (c) 2003-2019 by Mike Jarvis\n#\n# TreeCorr is free software: redistribution and use in source and binary forms,\n# with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions, and the disclaimer given in the accompanying LICENSE\n# file.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the disclaimer given in the documentation\n# and/or other materials provided with the distribution.\n\n\"\"\"\n.. module:: binnedcorr3\n\"\"\"\n\nimport math\nimport numpy as np\nimport sys\nimport coord\nimport treecorr\n\nclass BinnedCorr3(object):\n \"\"\"This class stores the results of a 3-point correlation calculation, along with some\n ancillary data.\n\n This is a base class that is not intended to be constructed directly. But it has a few\n helper functions that derived classes can use to help perform their calculations. See\n the derived classes for more details:\n\n - `NNNCorrelation` handles count-count-count correlation functions\n - `KKKCorrelation` handles kappa-kappa-kappa correlation functions\n - `GGGCorrelation` handles gamma-gamma-gamma correlation functions\n\n Three-point correlations are a bit more complicated than two-point, since the data need\n to be binned in triangles, not just the separation between two points. We characterize the\n triangles according to the following three parameters based on the three side lenghts\n of the triangle with d1 >= d2 >= d3.\n\n .. math::\n r &= d2 \\\\\\\\\n u &= \\\\frac{d3}{d2} \\\\\\\\\n v &= \\\\pm \\\\frac{(d1 - d2)}{d3} \\\\\\\\\n\n The orientation of the triangle is specified by the sign of v.\n Positive v triangles have the three sides d1,d2,d3 in counter-clockwise orientation.\n Negative v triangles have the three sides d1,d2,d3 in clockwise orientation.\n\n .. note::\n We always bin the same way for positive and negative v values, and the binning\n specification for v should just be for the positive values. E.g. if you specify\n min_v=0.2, max_v=0.6, then TreeCorr will also accumulate triangles with\n -0.6 < v < -0.2 in addition to those with 0.2 < v < 0.6.\n\n The constructor for all derived classes take a config dict as the first argument,\n since this is often how we keep track of parameters, but if you don't want to\n use one or if you want to change some parameters from what are in a config dict,\n then you can use normal kwargs, which take precedence over anything in the config dict.\n\n There are only two implemented definitions for the distance between two points for\n three-point corretions:\n\n - 'Euclidean' = straight line Euclidean distance between two points. For spherical\n coordinates (ra,dec without r), this is the chord distance between points on the\n unit sphere.\n - 'Arc' = the true great circle distance for spherical coordinates.\n - 'Periodic' = Like Euclidean, but with periodic boundaries. Note that the triangles\n for three-point correlations can become ambiguous if d1 > period/2, which means\n the maximum d2 (max_sep) should be less than period/4. This is not enforced.\n\n Similarly, we have so far only implemented one binning type for three-point correlations.\n\n - 'LogRUV' - The bin steps will be uniform in log(r) from log(min_sep) .. log(max_sep).\n The u and v values are binned linearly from min_u .. max_u and min_v .. max_v.\n\n\n Parameters:\n config (dict): A configuration dict that can be used to pass in the below kwargs if\n desired. This dict is allowed to have addition entries in addition\n to those listed below, which are ignored here. (default: None)\n logger: If desired, a logger object for logging. (default: None, in which case\n one will be built according to the config dict's verbose level.)\n\n Keyword Arguments:\n\n nbins (int): How many bins to use. (Exactly three of nbins, bin_size, min_sep,\n max_sep are required. If nbins is not given, it will be calculated from\n the values of the other three, rounding up to the next highest integer.\n In this case, bin_size will be readjusted to account for this rounding\n up.)\n bin_size (float): The width of the bins in log(separation). (Exactly three of nbins,\n bin_size, min_sep, max_sep are required. If bin_size is not given, it\n will be calculated from the values of the other three.)\n min_sep (float): The minimum separation in units of sep_units, if relevant. (Exactly\n three of nbins, bin_size, min_sep, max_sep are required. If min_sep is\n not given, it will be calculated from the values of the other three.)\n max_sep (float): The maximum separation in units of sep_units, if relevant. (Exactly\n three of nbins, bin_size, min_sep, max_sep are required. If max_sep is\n not given, it will be calculated from the values of the other three.\n\n sep_units (str): The units to use for the separation values, given as a string. This\n includes both min_sep and max_sep above, as well as the units of the\n output distance values. Valid options are arcsec, arcmin, degrees,\n hours, radians. (default: radians if angular units make sense, but for\n 3-d or flat 2-d positions, the default will just match the units of\n x,y[,z] coordinates)\n bin_slop (float): How much slop to allow in the placement of pairs in the bins.\n If bin_slop = 1, then the bin into which a particular pair is placed\n may be incorrect by at most 1.0 bin widths. (default: None, which\n means to use a bin_slop that gives a maximum error of 10% on any bin,\n which has been found to yield good results for most application.\n\n nubins (int): Analogous to nbins for the u values. (The default is to calculate from\n ubin_size = binsize, min_u = 0, max_u = 1, but this can be overridden\n by specifying up to 3 of these four parametes.)\n ubin_size (float): Analogous to bin_size for the u values. (default: bin_size)\n min_u (float): Analogous to min_sep for the u values. (default: 0)\n max_u (float): Analogous to max_sep for the u values. (default: 1)\n\n nvbins (int): Analogous to nbins for the positive v values. (The default is to\n calculate from vbin_size = binsize, min_v = 0, max_v = 1, but this can\n be overridden by specifying up to 3 of these four parametes.)\n vbin_size (float): Analogous to bin_size for the v values. (default: bin_size)\n min_v (float): Analogous to min_sep for the positive v values. (default: 0)\n max_v (float): Analogous to max_sep for the positive v values. (default: 1)\n\n brute (bool): Whether to use the \"brute force\" algorithm. (default: False) Options\n are:\n\n - False (the default): Stop at non-leaf cells whenever the error in\n the separation is compatible with the given bin_slop.\n - True: Go to the leaves for both catalogs.\n - 1: Always go to the leaves for cat1, but stop at non-leaf cells of\n cat2 when the error is compatible with the given bin_slop.\n - 2: Always go to the leaves for cat2, but stop at non-leaf cells of\n cat1 when the error is compatible with the given bin_slop.\n\n verbose (int): If no logger is provided, this will optionally specify a logging level\n to use:\n\n - 0 means no logging output\n - 1 means to output warnings only (default)\n - 2 means to output various progress information\n - 3 means to output extensive debugging information\n\n log_file (str): If no logger is provided, this will specify a file to write the logging\n output. (default: None; i.e. output to standard output)\n output_dots (boo): Whether to output progress dots during the calcualtion of the\n correlation function. (default: False unless verbose is given and >= 2,\n in which case True)\n\n split_method (str): How to split the cells in the tree when building the tree structure.\n Options are:\n\n - mean = Use the arithmetic mean of the coordinate being split.\n (default)\n - median = Use the median of the coordinate being split.\n - middle = Use the middle of the range; i.e. the average of the minimum\n and maximum value.\n - random: Use a random point somewhere in the middle two quartiles of\n the range.\n\n min_top (int): The minimum number of top layers to use when setting up the field.\n (default: 3)\n max_top (int): The maximum number of top layers to use when setting up the field.\n The top-level cells are where each calculation job starts. There will\n typically be of order 2^max_top top-level cells. (default: 10)\n precision (int): The precision to use for the output values. This specifies how many\n digits to write. (default: 4)\n\n metric (str): Which metric to use for distance measurements. Options are listed\n above. (default: 'Euclidean')\n bin_type (str): What type of binning should be used. Options are listed above.\n (default: 'LogRUV')\n min_rpar (float): Not currently supported for 3 point correlation. (default: None)\n max_rpar (float): Not currently supported for 3 point correlation. (default: None)\n period (float): For the 'Periodic' metric, the period to use in all directions.\n (default: None)\n xperiod (float): For the 'Periodic' metric, the period to use in the x direction.\n (default: period)\n yperiod (float): For the 'Periodic' metric, the period to use in the y direction.\n (default: period)\n zperiod (float): For the 'Periodic' metric, the period to use in the z direction.\n (default: period)\n\n num_threads (int): How many OpenMP threads to use during the calculation.\n (default: use the number of cpu cores; this value can also be given in\n the constructor in the config dict.) Note that this won't work if the\n system's C compiler cannot use OptnMP (e.g. clang prior to version 3.7.)\n \"\"\"\n _valid_params = {\n 'nbins' : (int, False, None, None,\n 'The number of output bins to use for sep dimension.'),\n 'bin_size' : (float, False, None, None,\n 'The size of the output bins in log(sep).'),\n 'min_sep' : (float, False, None, None,\n 'The minimum separation to include in the output.'),\n 'max_sep' : (float, False, None, None,\n 'The maximum separation to include in the output.'),\n 'sep_units' : (str, False, None, coord.AngleUnit.valid_names,\n 'The units to use for min_sep and max_sep. Also the units of the output distances'),\n 'bin_slop' : (float, False, None, None,\n 'The fraction of a bin width by which it is ok to let the pairs miss the correct bin.',\n 'The default is to use 1 if bin_size <= 0.1, or 0.1/bin_size if bin_size > 0.1.'),\n 'nubins' : (int, False, None, None,\n 'The number of output bins to use for u dimension.'),\n 'ubin_size' : (float, False, None, None,\n 'The size of the output bins in u.'),\n 'min_u' : (float, False, None, None,\n 'The minimum u to include in the output.'),\n 'max_u' : (float, False, None, None,\n 'The maximum u to include in the output.'),\n 'nvbins' : (int, False, None, None,\n 'The number of output bins to use for positive v values.'),\n 'vbin_size' : (float, False, None, None,\n 'The size of the output bins in v.'),\n 'min_v' : (float, False, None, None,\n 'The minimum |v| to include in the output.'),\n 'max_v' : (float, False, None, None,\n 'The maximum |v| to include in the output.'),\n 'brute' : (bool, False, False, [False, True],\n 'Whether to use brute-force algorithm'),\n 'verbose' : (int, False, 1, [0, 1, 2, 3],\n 'How verbose the code should be during processing. ',\n '0 = Errors Only, 1 = Warnings, 2 = Progress, 3 = Debugging'),\n 'log_file' : (str, False, None, None,\n 'If desired, an output file for the logging output.',\n 'The default is to write the output to stdout.'),\n 'output_dots' : (bool, False, None, None,\n 'Whether to output dots to the stdout during the C++-level computation.',\n 'The default is True if verbose >= 2 and there is no log_file. Else False.'),\n 'split_method' : (str, False, 'mean', ['mean', 'median', 'middle', 'random'],\n 'Which method to use for splitting cells.'),\n 'min_top' : (int, False, 3, None,\n 'The minimum number of top layers to use when setting up the field.'),\n 'max_top' : (int, False, 10, None,\n 'The maximum number of top layers to use when setting up the field.'),\n 'precision' : (int, False, 4, None,\n 'The number of digits after the decimal in the output.'),\n 'num_threads' : (int, False, None, None,\n 'How many threads should be used. num_threads <= 0 means auto based on num cores.'),\n 'metric': (str, False, 'Euclidean', ['Euclidean', 'Arc', 'Periodic'],\n 'Which metric to use for the distance measurements'),\n 'bin_type': (str, False, 'LogRUV', ['LogRUV'],\n 'Which type of binning should be used'),\n 'min_rpar': (float, False, None, None,\n 'The minimum difference in Rparallel for pairs to include'),\n 'max_rpar': (float, False, None, None,\n 'The maximum difference in Rparallel for pairs to include'),\n 'period': (float, False, None, None,\n 'The period to use for all directions for the Periodic metric'),\n 'xperiod': (float, False, None, None,\n 'The period to use for the x direction for the Periodic metric'),\n 'yperiod': (float, False, None, None,\n 'The period to use for the y direction for the Periodic metric'),\n 'zperiod': (float, False, None, None,\n 'The period to use for the z direction for the Periodic metric'),\n }\n\n def __init__(self, config=None, logger=None, **kwargs):\n self.config = treecorr.config.merge_config(config,kwargs,BinnedCorr3._valid_params)\n if logger is None:\n self.logger = treecorr.config.setup_logger(\n treecorr.config.get(self.config,'verbose',int,1),\n self.config.get('log_file',None))\n else:\n self.logger = logger\n\n if 'output_dots' in self.config:\n self.output_dots = treecorr.config.get(self.config,'output_dots',bool)\n else:\n self.output_dots = treecorr.config.get(self.config,'verbose',int,1) >= 2\n\n self.bin_type = self.config.get('bin_type', None)\n self._bintype = treecorr._lib.Log\n\n self.sep_units = self.config.get('sep_units','')\n self._sep_units = treecorr.config.get(self.config,'sep_units',str,'radians')\n self._log_sep_units = math.log(self._sep_units)\n if 'nbins' not in self.config:\n if 'max_sep' not in self.config:\n raise TypeError(\"Missing required parameter max_sep\")\n if 'min_sep' not in self.config:\n raise TypeError(\"Missing required parameter min_sep\")\n if 'bin_size' not in self.config:\n raise TypeError(\"Missing required parameter bin_size\")\n self.min_sep = float(self.config['min_sep'])\n self.max_sep = float(self.config['max_sep'])\n if self.min_sep >= self.max_sep:\n raise ValueError(\"max_sep must be larger than min_sep\")\n bin_size = float(self.config['bin_size'])\n self.nbins = int(math.ceil(math.log(self.max_sep/self.min_sep)/bin_size))\n # Update self.bin_size given this value of nbins\n self.bin_size = math.log(self.max_sep/self.min_sep)/self.nbins\n # Note in this case, bin_size is saved as the nominal bin_size from the config\n # file, and self.bin_size is the one for the radial bins. We'll use the nominal\n # bin_size as the default bin_size for u and v below.\n elif 'bin_size' not in self.config:\n if 'max_sep' not in self.config:\n raise TypeError(\"Missing required parameter max_sep\")\n if 'min_sep' not in self.config:\n raise TypeError(\"Missing required parameter min_sep\")\n self.min_sep = float(self.config['min_sep'])\n self.max_sep = float(self.config['max_sep'])\n if self.min_sep >= self.max_sep:\n raise ValueError(\"max_sep must be larger than min_sep\")\n self.nbins = int(self.config['nbins'])\n bin_size = self.bin_size = math.log(self.max_sep/self.min_sep)/self.nbins\n elif 'max_sep' not in self.config:\n if 'min_sep' not in self.config:\n raise TypeError(\"Missing required parameter min_sep\")\n self.min_sep = float(self.config['min_sep'])\n self.nbins = int(self.config['nbins'])\n bin_size = self.bin_size = float(self.config['bin_size'])\n self.max_sep = math.exp(self.nbins*bin_size)*self.min_sep\n else:\n if 'min_sep' in self.config:\n raise TypeError(\"Only 3 of min_sep, max_sep, bin_size, nbins are allowed.\")\n self.max_sep = float(self.config['max_sep'])\n self.nbins = int(self.config['nbins'])\n bin_size = self.bin_size = float(self.config['bin_size'])\n self.min_sep = self.max_sep*math.exp(-self.nbins*bin_size)\n if self.sep_units == '':\n self.logger.info(\"r: nbins = %d, min,max sep = %g..%g, bin_size = %g\",\n self.nbins,self.min_sep,self.max_sep,self.bin_size)\n else:\n self.logger.info(\"r: nbins = %d, min,max sep = %g..%g %s, bin_size = %g\",\n self.nbins,self.min_sep/self._sep_units,self.max_sep/self._sep_units,\n self.sep_units,self.bin_size)\n # The underscore-prefixed names are in natural units (radians for angles)\n self._min_sep = self.min_sep * self._sep_units\n self._max_sep = self.max_sep * self._sep_units\n self._bin_size = self.bin_size # There is not Linear, but if I add it, need to apply\n # units to _bin_size in that case as well.\n\n self.min_u = float(self.config.get('min_u', 0.))\n self.max_u = float(self.config.get('max_u', 1.))\n if self.min_u >= self.max_u:\n raise ValueError(\"max_u must be larger than min_u\")\n if self.min_u < 0. or self.max_u > 1.:\n raise ValueError(\"Invalid range for u: %f - %f\"%(self.min_u, self.max_u))\n self.ubin_size = float(self.config.get('ubin_size', bin_size))\n if 'nubins' not in self.config:\n self.nubins = int(math.ceil((self.max_u-self.min_u-1.e-10)/self.ubin_size))\n elif 'max_u' in self.config and 'min_u' in self.config and 'ubin_size' in self.config:\n raise TypeError(\"Only 3 of min_u, max_u, ubin_size, nubins are allowed.\")\n else:\n self.nubins = self.config['nubins']\n # Allow min or max u to be implicit from nubins and ubin_size\n if 'ubin_size' in self.config:\n if 'min_u' not in self.config:\n self.min_u = max(self.max_u - self.nubins * self.ubin_size, 0.)\n if 'max_u' not in self.config:\n self.max_u = min(self.min_u + self.nubins * self.ubin_size, 1.)\n # Adjust ubin_size given the other values\n self.ubin_size = (self.max_u-self.min_u)/self.nubins\n self.logger.info(\"u: nbins = %d, min,max = %g..%g, bin_size = %g\",\n self.nubins,self.min_u,self.max_u,self.ubin_size)\n\n self.min_v = float(self.config.get('min_v', 0.))\n self.max_v = float(self.config.get('max_v', 1.))\n if self.min_v >= self.max_v:\n raise ValueError(\"max_v must be larger than min_v\")\n if self.min_v < 0 or self.max_v > 1.:\n raise ValueError(\"Invalid range for |v|: %f - %f\"%(self.min_v, self.max_v))\n self.vbin_size = float(self.config.get('vbin_size', bin_size))\n if 'nvbins' not in self.config:\n self.nvbins = int(math.ceil((self.max_v-self.min_v-1.e-10)/self.vbin_size))\n elif 'max_v' in self.config and 'min_v' in self.config and 'vbin_size' in self.config:\n raise TypeError(\"Only 3 of min_v, max_v, vbin_size, nvbins are allowed.\")\n else:\n self.nvbins = self.config['nvbins']\n # Allow min or max v to be implicit from nvbins and vbin_size\n if 'vbin_size' in self.config:\n if 'max_v' not in self.config:\n self.max_v = min(self.min_v + self.nvbins * self.vbin_size, 1.)\n else: # min_v not in config\n self.min_v = max(self.max_v - self.nvbins * self.vbin_size, -1.)\n # Adjust vbin_size given the other values\n self.vbin_size = (self.max_v-self.min_v)/self.nvbins\n self.logger.info(\"v: nbins = %d, min,max = %g..%g, bin_size = %g\",\n self.nvbins,self.min_v,self.max_v,self.vbin_size)\n\n self.split_method = self.config.get('split_method','mean')\n self.logger.debug(\"Using split_method = %s\",self.split_method)\n\n self.min_top = treecorr.config.get(self.config,'min_top',int,3)\n self.max_top = treecorr.config.get(self.config,'max_top',int,10)\n\n self.bin_slop = treecorr.config.get(self.config,'bin_slop',float,-1.0)\n if self.bin_slop < 0.0:\n if self.bin_size <= 0.1:\n self.bin_slop = 1.0\n self.b = self.bin_size\n else:\n self.bin_slop = 0.1/self.bin_size # The stored bin_slop corresponds to lnr bins.\n self.b = 0.1\n if self.ubin_size <= 0.1:\n self.bu = self.ubin_size\n else:\n self.bu = 0.1\n if self.vbin_size <= 0.1:\n self.bv = self.vbin_size\n else:\n self.bv = 0.1\n else:\n self.b = self.bin_size * self.bin_slop\n self.bu = self.ubin_size * self.bin_slop\n self.bv = self.vbin_size * self.bin_slop\n\n if self.b > 0.100001: # Add some numerical slop\n self.logger.warning(\n \"Using bin_slop = %g, bin_size = %g\\n\"%(self.bin_slop,self.bin_size)+\n \"The b parameter is bin_slop * bin_size = %g\"%(self.b)+\n \" bu = %g, bv = %g\\n\"%(self.bu,self.bv)+\n \"It is generally recommended to use b <= 0.1 for most applications.\\n\"+\n \"Larger values of this b parameter may result in significant inaccuracies.\")\n else:\n self.logger.debug(\"Using bin_slop = %g, b = %g, bu = %g, bv = %g\",\n self.bin_slop,self.b,self.bu,self.bv)\n\n # This makes nbins evenly spaced entries in log(r) starting with 0 with step bin_size\n self.logr1d = np.linspace(start=0, stop=self.nbins*self.bin_size,\n num=self.nbins, endpoint=False)\n # Offset by the position of the center of the first bin.\n self.logr1d += math.log(self.min_sep) + 0.5*self.bin_size\n\n self.u1d = np.linspace(start=0, stop=self.nubins*self.ubin_size,\n num=self.nubins, endpoint=False)\n self.u1d += self.min_u + 0.5*self.ubin_size\n\n self.v1d = np.linspace(start=0, stop=self.nvbins*self.vbin_size,\n num=self.nvbins, endpoint=False)\n self.v1d += self.min_v + 0.5*self.vbin_size\n self.v1d = np.concatenate([-self.v1d[::-1],self.v1d])\n\n shape = (self.nbins, self.nubins, 2*self.nvbins)\n self.logr = np.tile(self.logr1d[:, np.newaxis, np.newaxis],\n (1, self.nubins, 2*self.nvbins))\n self.u = np.tile(self.u1d[np.newaxis, :, np.newaxis],\n (self.nbins, 1, 2*self.nvbins))\n self.v = np.tile(self.v1d[np.newaxis, np.newaxis, :],\n (self.nbins, self.nubins, 1))\n self.rnom = np.exp(self.logr)\n self.rnom1d = np.exp(self.logr1d)\n self.brute = treecorr.config.get(self.config,'brute',bool,False)\n if self.brute:\n self.logger.info(\"Doing brute force calculation.\",)\n self.coords = None\n self.metric = None\n self.min_rpar = treecorr.config.get(self.config,'min_rpar',float,-sys.float_info.max)\n self.max_rpar = treecorr.config.get(self.config,'max_rpar',float,sys.float_info.max)\n period = treecorr.config.get(self.config,'period',float,0)\n self.xperiod = treecorr.config.get(self.config,'xperiod',float,period)\n self.yperiod = treecorr.config.get(self.config,'yperiod',float,period)\n self.zperiod = treecorr.config.get(self.config,'zperiod',float,period)\n\n def _process_all_auto(self, cat1, metric, num_threads):\n # I'm not sure which of these is more intuitive, but both are correct...\n if True:\n for c1 in cat1:\n self.process_auto(c1, metric, num_threads)\n for c2 in cat1:\n if c2 is not c1:\n self.process_cross(c1,c1,c2, metric, num_threads)\n self.process_cross(c1,c2,c1, metric, num_threads)\n self.process_cross(c2,c1,c1, metric, num_threads)\n for c3 in cat1:\n if c3 is not c1 and c3 is not c2:\n self.process_cross(c1,c2,c3, metric, num_threads)\n else: # pragma: no cover\n for i,c1 in enumerate(cat1):\n self.process_auto(c1)\n for j,c2 in enumerate(cat1[i+1:]):\n self.process_cross(c1,c1,c2, metric, num_threads)\n self.process_cross(c1,c2,c1, metric, num_threads)\n self.process_cross(c2,c1,c1, metric, num_threads)\n self.process_cross(c1,c2,c2, metric, num_threads)\n self.process_cross(c2,c1,c2, metric, num_threads)\n self.process_cross(c2,c2,c1, metric, num_threads)\n for c3 in cat1[i+j+1:]:\n self.process_cross(c1,c2,c3, metric, num_threads)\n self.process_cross(c1,c3,c2, metric, num_threads)\n self.process_cross(c2,c1,c3, metric, num_threads)\n self.process_cross(c2,c3,c1, metric, num_threads)\n self.process_cross(c3,c1,c2, metric, num_threads)\n self.process_cross(c3,c2,c1, metric, num_threads)\n\n # These are not actually implemented yet.\n def _process_all_cross21(self, cat1, cat2, metric, num_threads): # pragma: no cover\n for c1 in cat1:\n for c2 in cat2:\n self.process_cross(c1,c1,c2, metric, num_threads)\n for c3 in cat1:\n if c3 is not c1:\n self.process_cross(c1,c3,c2, metric, num_threads)\n self.process_cross(c3,c1,c2, metric, num_threads)\n\n def _process_all_cross(self, cat1, cat2, cat3, metric, num_threads):\n for c1 in cat1:\n for c2 in cat2:\n for c3 in cat3:\n self.process_cross(c1,c2,c3, metric, num_threads)\n\n def _set_num_threads(self, num_threads):\n if num_threads is None:\n num_threads = self.config.get('num_threads',None)\n if num_threads is None:\n self.logger.debug('Set num_threads automatically from ncpu')\n else:\n self.logger.debug('Set num_threads = %d',num_threads)\n treecorr.set_omp_threads(num_threads, self.logger)\n\n def _set_metric(self, metric, coords1, coords2=None, coords3=None):\n if metric is None:\n metric = treecorr.config.get(self.config,'metric',str,'Euclidean')\n coords, metric = treecorr.util.parse_metric(metric, coords1, coords2, coords3)\n if self.coords != None or self.metric != None:\n if coords != self.coords:\n self.logger.warning(\"Detected a change in catalog coordinate systems. \"+\n \"This probably doesn't make sense!\")\n if metric != self.metric:\n self.logger.warning(\"Detected a change in metric. \"+\n \"This probably doesn't make sense!\")\n if metric == 'Periodic':\n if self.xperiod == 0 or self.yperiod == 0 or (coords=='3d' and self.zperiod == 0):\n raise ValueError(\"Periodic metric requires setting the period to use.\")\n else:\n if self.xperiod != 0 or self.yperiod != 0 or self.zperiod != 0:\n raise ValueError(\"period options are not valid for %s metric.\"%metric)\n self.coords = coords\n self.metric = metric\n self._coords = treecorr.util.coord_enum(coords)\n self._metric = treecorr.util.metric_enum(metric)\n\n def _apply_units(self, mask):\n if self.coords == 'spherical' and self.metric == 'Euclidean':\n # Then our distances are all angles. Convert from the chord distance to a real angle.\n # L = 2 sin(theta/2)\n self.meand1[mask] = 2. * np.arcsin(self.meand1[mask]/2.)\n self.meanlogd1[mask] = np.log(2.*np.arcsin(np.exp(self.meanlogd1[mask])/2.))\n self.meand2[mask] = 2. * np.arcsin(self.meand2[mask]/2.)\n self.meanlogd2[mask] = np.log(2.*np.arcsin(np.exp(self.meanlogd2[mask])/2.))\n self.meand3[mask] = 2. * np.arcsin(self.meand3[mask]/2.)\n self.meanlogd3[mask] = np.log(2.*np.arcsin(np.exp(self.meanlogd3[mask])/2.))\n\n self.meand1[mask] /= self._sep_units\n self.meanlogd1[mask] -= self._log_sep_units\n self.meand2[mask] /= self._sep_units\n self.meanlogd2[mask] -= self._log_sep_units\n self.meand3[mask] /= self._sep_units\n self.meanlogd3[mask] -= self._log_sep_units\n\n def _get_minmax_size(self):\n if self.metric == 'Euclidean':\n # The minimum separation we care about is that of the smallest size, which is\n # min_sep * min_u. Do the same calculation as for 2pt to get to min_size.\n b1 = min(self.b, self.bu, self.bv)\n min_size = self._min_sep * self.min_u * b1 / (2.+3.*b1)\n\n # This time, the maximum size is d1 * b. d1 can be as high as 2*max_sep.\n b2 = max(self.b, self.bu, self.bv)\n max_size = 2. * self._max_sep * b2\n return min_size, max_size\n else:\n return 0., 0.\n\n" ]
[ [ "numpy.tile", "numpy.arcsin", "numpy.exp", "numpy.concatenate", "numpy.linspace" ] ]
jmrozanec/features-generator
[ "0772394cf1c4a56a88c8a8faba2e5c84b4b2883f" ]
[ "features/feature_generator.py" ]
[ "from sklearn.base import BaseEstimator, TransformerMixin\nfrom feature_generation_strategy import MinFeatureGenerationStrategy, MaxFeatureGenerationStrategy, SumFeatureGenerationStrategy, DiffFeatureGenerationStrategy, ProdFeatureGenerationStrategy, DivFeatureGenerationStrategy, AvgFeatureGenerationStrategy, PCAFeatureGenerationStrategy, TSVDFeatureGenerationStrategy, ICAFeatureGenerationStrategy, GRPFeatureGenerationStrategy, SRPFeatureGenerationStrategy\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\n\n\nclass FeatureGenerator(BaseEstimator, TransformerMixin):\n \"\"\"\n Feature generator: enables to create features using provided strategies.\n \"\"\"\n def __init__(self, key, strategies):\n self.key = key\n self.strategies = strategies\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return X[self.key]\n\n\n\n\n\n\n\niris = load_iris()\ndf = pd.DataFrame(data= np.c_[iris['data'], iris['target']], columns= iris['feature_names'] + ['target'])\ntrain, X_valtest, y_train, y_valtest = train_test_split(df[df.columns.difference(['target'])], df['target'], test_size=0.3)\nval, test, y_val, y_test = train_test_split(X_valtest, y_valtest, test_size=0.5)\n\ncolumn_names = train.columns.values\nminstrat = MinFeatureGenerationStrategy()\nmaxstrat = MaxFeatureGenerationStrategy()\nsumstrat = SumFeatureGenerationStrategy()\ndiffstrat = DiffFeatureGenerationStrategy()\nprodstrat = ProdFeatureGenerationStrategy()\ndivstrat = DivFeatureGenerationStrategy()\navgstrat = AvgFeatureGenerationStrategy()\npcastrat = PCAFeatureGenerationStrategy()\ntsvdstrat = TSVDFeatureGenerationStrategy()\nicastrat = ICAFeatureGenerationStrategy()\ngrpstrat = GRPFeatureGenerationStrategy()\nsrpstrat = SRPFeatureGenerationStrategy()\n\nstrategies1 = [minstrat, maxstrat, sumstrat, diffstrat, prodstrat, divstrat, avgstrat]\nstrategies2 = [pcastrat, tsvdstrat, icastrat, grpstrat, srpstrat]\n\ndef generate_features(train, val, test, colname1, colname2, strategies):\n\tfor strategy in strategies:\n\t\ttrain, val, test = strategy.generate(train, val, test, colname1, colname2)\n\treturn (train, val, test)\n\nfor colname1 in column_names:\n\tfor colname2 in column_names:\n\t\ttrain, val, test = generate_features(train, val, test, colname1, colname2, strategies1)\n\ntrain.fillna(0, inplace=True)\nval.fillna(0, inplace=True)\ntest.fillna(0, inplace=True)\n\nfor strategy in strategies2:\n\ttrain, val, test = strategy.generate(train, val, test, 10)\n\nnew_column_names = train.columns.values\nprint(\"original columns: {}\".format(\",\".join(column_names)))\nprint(\"new columns: {}\".format(\",\".join(new_column_names)))\n" ]
[ [ "pandas.DataFrame", "sklearn.model_selection.train_test_split", "sklearn.datasets.load_iris" ] ]
FOLEFAC/InstantReact
[ "ba0e00ef4c84710accfe6b278000da06ee2d6438" ]
[ "test.py" ]
[ "\r\n# Imports\r\n\r\nimport torch\r\nfrom torchvision import datasets, models, transforms # All torchvision modules\r\nimport torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, Loss functions,..\r\nimport torch.optim as optim # For all Optimization algorithms, SGD, Adam,...\r\nimport torch.nn.functional as F # All functions that don't have any parameters\r\nfrom torch.utils.data import (DataLoader,Dataset) # Gives easier dataset managment and creates mini batches\r\nimport torchvision.datasets as datasets # Has standard datasets we can import in a nice way\r\nimport torchvision.transforms as transforms # Transformations we can perform on our dataset\r\nimport torchtext # Makes it easy to work with sequence data \r\nfrom torchtext.data import get_tokenizer\r\n\r\nimport re # regex library\r\nimport os # Doing operating system operations\r\nimport cv2 # Computer vision tasks with OpenCV\r\nimport numpy as np # Powerful arrray computation library\r\nfrom PIL import Image # WOrking with image files\r\nimport pandas # Extracting data from csv\r\nimport math # Math package\r\nimport pickle # Saving variables for later usage.\r\n\r\nimport argparse\r\nfrom torchsummary import summary # Make understanding of models easier\r\nimport torch # PyTorch library\r\nfrom time import time # Using timer in code\r\n\r\n\r\nfrom utils import Utils\r\nfrom text_processor import TextProcessor\r\nfrom dataset import CustomDataset\r\nfrom models import Encoder_LSTM, Decoder_LSTM, Seq2Seq\r\n\r\nimport models as md\r\n\r\n# Set device\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # Use Cuda if GPU available!\r\n\r\n\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--NUMBER_OF_FRAMES', type=int, default=40) \r\nparser.add_argument('--LEARNING_RATE', type=float, default=1e-3)\r\nparser.add_argument('--BATCH_SIZE', type=int, default=1) \r\nparser.add_argument('--EPOCH', type=int, default=10) \r\nparser.add_argument('--TRAINING_DEVICE', type=str, default='cuda') \r\nparser.add_argument('--VOCAB_SIZE', type=int, default=200) \r\nparser.add_argument('--NUMBER_OF_WORDS', type=int, default=10) \r\nparser.add_argument('--HIDDEN_SIZE', type=int, default=300) \r\nparser.add_argument('--INPUT_SIZE', type=int, default=4096) \r\nparser.add_argument('--NUMBER_OF_LAYERS', type=int, default=1) \r\nparser.add_argument('--video_file', type=str)\r\nparser.add_argument('--train_corpus', type=str)\r\nparser.add_argument('--load_weights', type=str)\r\n\r\nFLAGS = parser.parse_args()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main(argv = None):\r\n\r\n \"\"\"\r\n Training.\r\n \"\"\"\r\n\r\n ### parametres\r\n\r\n LEARNING_RATE = FLAGS.LEARNING_RATE\r\n NUMBER_OF_FRAMES = FLAGS.NUMBER_OF_FRAMES\r\n BATCH_SIZE = FLAGS.BATCH_SIZE\r\n EPOCH = FLAGS.EPOCH\r\n TRAINING_DEVICE = FLAGS.TRAINING_DEVICE\r\n VOCAB_SIZE = FLAGS.VOCAB_SIZE\r\n NUMBER_OF_WORDS = FLAGS.NUMBER_OF_WORDS\r\n HIDDEN_SIZE = FLAGS.HIDDEN_SIZE\r\n INPUT_SIZE = FLAGS.INPUT_SIZE\r\n NUMBER_OF_LAYERS = FLAGS.NUMBER_OF_LAYERS\r\n tsfm = transforms.Compose([\r\n transforms.Resize([224, 224]),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\r\n ])\r\n train_corpus = FLAGS.train_corpus\r\n utils = Utils()\r\n all_text = utils.output_text(train_corpus)\r\n text_processor = TextProcessor(freq_threshold = 10)\r\n dictionary = text_processor.vocab_creator(all_text)\r\n\r\n\r\n\r\n ### Model definition\r\n encoder = Encoder_LSTM(input_size = INPUT_SIZE, hidden_size = HIDDEN_SIZE , num_layers = NUMBER_OF_LAYERS)\r\n decoder = Decoder_LSTM(input_size = VOCAB_SIZE, hidden_size = HIDDEN_SIZE , num_layers = NUMBER_OF_LAYERS,number_of_words = NUMBER_OF_WORDS)\r\n model_seq_to_seq = Seq2Seq(encoder, decoder).to(device)\r\n model = model_seq_to_seq\r\n\r\n\r\n ### load the state_dict of model if model has been pretrained.\r\n model.load_state_dict(torch.load(FLAGS.load_weights))\r\n\r\n\r\n\r\n\r\n #### Model Testing\r\n model.eval();\r\n from random import randint\r\n import matplotlib.pyplot as plt\r\n\r\n utils = Utils()\r\n\r\n video_path = FLAGS.video_file\r\n\r\n video_pre_data = utils.video_to_frames(video_path,frame_number = NUMBER_OF_FRAMES, device = 'cuda', INPUT_SIZE = INPUT_SIZE , model = md.model_vgg, transform = tsfm)\r\n \r\n X_2 = torch.zeros([NUMBER_OF_WORDS,VOCAB_SIZE])\r\n\r\n for i in range(NUMBER_OF_WORDS):\r\n if (i == 0):\r\n \r\n X_2[i][2] = 1\r\n else:\r\n X_2[i][1] = 1\r\n\r\n input_data = video_pre_data.unsqueeze(0)\r\n\r\n final_sentence = []\r\n\r\n X_2 = X_2.unsqueeze(0)\r\n X_2 = X_2.to(device)\r\n input_data = input_data.to(device)\r\n\r\n\r\n\r\n\r\n for i in range(NUMBER_OF_WORDS-1):\r\n with torch.no_grad():\r\n predicted = model(input_data, X_2)\r\n predicted = predicted.squeeze(0)\r\n\r\n final_sentence.append(next((key for key, value in dictionary.items() if value == torch.argmax(predicted[i])), None))\r\n X_2[0][i+1][torch.argmax(predicted[i])] = 1\r\n X_2[0][i+1][1] = 0\r\n print(final_sentence)\r\n\r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n main()" ]
[ [ "torch.load", "torch.argmax", "torch.no_grad", "torch.cuda.is_available", "torch.zeros" ] ]
opengulf/nyc-directories-support-scripts
[ "e22582b8f4cb3c365e9aac1d860d9c36831277a5" ]
[ "py-hocr-detect-columns.py" ]
[ "import numpy as np\nfrom bs4 import BeautifulSoup\nfrom sklearn.cluster import KMeans\nfrom math import sqrt\nfrom statistics import mean\nfrom PIL import Image, ImageOps, ImageDraw\nimport re\nimport json\nimport argparse\nimport os\nimport uuid\nfrom cdparser import Classifier, Features, LabeledEntry, Utils\nimport sys\n\n\ndef build_manifest(main_path, entries_json):\n directory_uuid = entries_json[0]['directory_uuid']\n page_uuid = entries_json[0]['page_uuid']\n num_columns = max([int(entries_json[i]['col']) for i in entries_json])\n num_skipped_lines = len(\n [i for i in entries_json if entries_json[i]['skipped_line_after'] == '1'])\n if not os.path.exists(os.path.join(main_path, 'manifest')):\n os.makedirs(os.path.join(main_path, 'manifest'))\n manifest_path = os.path.join(\n main_path, 'manifest', directory_uuid + '_manifest.txt')\n with open(manifest_path, 'a') as f:\n f.write('directory_uuid:' + directory_uuid + '\\n')\n f.write('page_uuid:' + page_uuid + '\\n')\n f.write('number_extracted_entries:' + str(len(entries_json)) + '\\n')\n f.write('total_number_lines_hocr:' +\n entries_json[0]['total_lines_from_hocr'] + '\\n')\n f.write('number_extracted_columns:' + str(num_columns) + '\\n')\n f.write('number_skipped_lines:' + str(num_skipped_lines))\n f.write('\\n\\n')\n f.close()\n\n\ndef make_tsv(filepath, type):\n with open(filepath, 'w+') as f:\n f.write('\\t'.join(['directory_uuid', 'page_uuid',\n 'entry_uuid', type + '_count', 'offset_count', 'token']))\n f.write('\\n')\n f.close()\n\n\ndef build_entries_tsv(entries_json, dir_tsv, directory_uuid):\n if not os.path.exists(os.path.join(dir_tsv, directory_uuid + '_subjects.tsv')):\n make_tsv(os.path.join(dir_tsv, directory_uuid +\n '_subjects.tsv'), 'subject')\n with open(os.path.join(dir_tsv, directory_uuid + '_subjects.tsv'), 'a') as f:\n for rec in entries_json:\n subject_count = 0\n for subject in entries_json[rec]['labeled_entry']['subjects']:\n offset_count = 0\n for sub_token in subject.split():\n f.write(entries_json[rec]['directory_uuid'] + '\\t'\n + entries_json[rec]['page_uuid'] + '\\t'\n + entries_json[rec]['entry_uuid'] + '\\t')\n f.write(str(subject_count) + '\\t')\n f.write(str(offset_count) + '\\t')\n f.write(sub_token + '\\n')\n offset_count += 1\n subject_count += 1\n f.close()\n if not os.path.exists(os.path.join(dir_tsv, directory_uuid + '_occupations.tsv')):\n make_tsv(os.path.join(dir_tsv, directory_uuid +\n '_occupations.tsv'), 'occupation')\n with open(os.path.join(dir_tsv, directory_uuid + '_occupations.tsv'), 'a') as f:\n for rec in entries_json:\n occupation_count = 0\n for occupation in entries_json[rec]['labeled_entry']['occupations']:\n offset_count = 0\n for occ_token in occupation.split():\n f.write(entries_json[rec]['directory_uuid'] + '\\t'\n + entries_json[rec]['page_uuid'] + '\\t'\n + entries_json[rec]['entry_uuid'] + '\\t')\n f.write(str(occupation_count) + '\\t')\n f.write(str(offset_count) + '\\t')\n f.write(occ_token + '\\n')\n offset_count += 1\n occupation_count += 1\n f.close()\n if not os.path.exists(os.path.join(dir_tsv, directory_uuid + '_locations.tsv')):\n make_tsv(os.path.join(dir_tsv, directory_uuid +\n '_locations.tsv'), 'location')\n with open(os.path.join(dir_tsv, directory_uuid + '_locations.tsv'), 'a') as f:\n for rec in entries_json:\n location_count = 0\n for location in entries_json[rec]['labeled_entry']['locations']:\n offset_count = 0\n for loc_token in location['value'].split():\n f.write(entries_json[rec]['directory_uuid'] + '\\t'\n + entries_json[rec]['page_uuid'] + '\\t'\n + entries_json[rec]['entry_uuid'] + '\\t')\n f.write(str(location_count) + '\\t')\n f.write(str(offset_count) + '\\t')\n f.write(loc_token + '\\n')\n offset_count += 1\n location_count += 1\n f.close()\n\n\ndef imagebuilder(r, col_locations, image_filename, std1, gap_locations, page_uuid, output_directory):\n\n # It would appear that the incoming cropped jpegs are grayscale, necessitating a conversion to RGB to move forward\n # For the overlay, we use RGBA to enable opacity settings.\n\n pageimg = Image.open(image_filename).convert('RGB')\n overlay = ImageDraw.Draw(pageimg, 'RGBA')\n\n color = {0: (255, 0, 0, 90), 1: (0, 0, 255, 90), 2: (0, 255, 0, 90)}\n\n # Draw the hocr boxes\n for i in range(len(r)):\n overlay.polygon([(r[i:i+1, 1], r[i:i+1, 4]),\n (r[i:i+1, 1], r[i:i+1, 2]),\n (r[i:i+1, 3], r[i:i+1, 2]),\n (r[i:i+1, 3], r[i:i+1, 4])],\n fill=color[int(r[i:i+1, 5])], outline=color[int(r[i:i+1, 5])])\n\n # Draw the column bounds\n\n for loc in col_locations:\n\n overlay.polygon([(loc - std1, 0), (loc + std1, 0),\n (loc + std1, pageimg.size[1]),\n (loc - std1, pageimg.size[1])],\n fill=(255, 1, 255, 100))\n overlay.line([(loc, 0), (loc, pageimg.size[1])],\n fill=(0, 0, 0, 127), width=4)\n\n # Draw the gap lines\n\n for gap_location in gap_locations:\n overlay.line([(0, gap_location), (pageimg.size[0], gap_location)],\n fill=(0, 0, 0, 127), width=4)\n pageimg.save(os.path.join(output_directory, page_uuid + '.jpeg'), 'JPEG')\n\n\ndef json_from_hocr(line_array, page_html, page_uuid, directory_uuid):\n hocr_entries = {}\n entries_json = {}\n total_page_line_count = 0\n for line in page_html.html.body.div.find_all('span'):\n if line['class'][0] == 'ocr_line':\n total_page_line_count += 1\n id_num = int(line['id'].split('_')[2])\n words = ' '.join([word.string.replace('\\n', '').strip()\n for word in line.children])\n hocr_entries[id_num] = normalize_entry(words)\n entry_id = 0\n for keep_line in line_array:\n entry_uuid = uuid.uuid1()\n if keep_line[5] == 0:\n entries_json[entry_id] = {\n 'directory_uuid': directory_uuid,\n 'page_uuid': page_uuid,\n 'entry_uuid': entry_uuid.hex,\n 'total_lines_from_hocr': str(total_page_line_count),\n 'original_hocr_line_number': str(keep_line[0]),\n 'bbox': ' '.join([str(val) for val in keep_line[1:5]]),\n 'col': str(keep_line[6]),\n 'appended': 'no',\n 'skipped_line_after': str(keep_line[7]),\n 'complete_entry': hocr_entries[keep_line[0]]\n }\n entry_id += 1\n\n # Indents\n else:\n try:\n if entries_json[entry_id - 1]['skipped_line_after'] != \"1\":\n if entries_json[entry_id - 1]['complete_entry'][-1] == '-':\n entries_json[entry_id -\n 1]['complete_entry'] += hocr_entries[keep_line[0]]\n else:\n entries_json[entry_id - 1]['complete_entry'] += ' ' + \\\n hocr_entries[keep_line[0]]\n entries_json[entry_id - 1]['appended'] = 'yes'\n else:\n entries_json[entry_id] = {\n 'directory_uuid': directory_uuid,\n 'page_uuid': page_uuid,\n 'entry_uuid': entry_uuid.hex,\n 'total_lines_from_hocr': str(total_page_line_count),\n 'original_hocr_line_number': str(keep_line[0]),\n 'bbox': ' '.join([str(val) for val in keep_line[1:5]]),\n 'col': str(keep_line[6]),\n 'appended': 'no',\n 'skipped_line_after': str(keep_line[7]),\n 'complete_entry': hocr_entries[keep_line[0]]\n }\n entry_id += 1\n except:\n\n # Cases where an indent is the first line in page array and there is no preceding entry\n\n entries_json[entry_id] = {\n 'directory_uuid': directory_uuid,\n 'page_uuid': page_uuid,\n 'entry_uuid': entry_uuid.hex,\n 'total_lines_from_hocr': str(total_page_line_count),\n 'original_hocr_line_number': str(keep_line[0]),\n 'bbox': ' '.join([str(val) for val in keep_line[1:5]]),\n 'col': str(keep_line[6]),\n 'appended': 'no',\n 'skipped_line_after': str(keep_line[7]),\n 'complete_entry': hocr_entries[keep_line[0]]\n }\n entry_id += 1\n\n return entries_json\n\n\ndef load_hocr_lines(filepath):\n page_array = []\n rawhtml = BeautifulSoup(open(filepath, encoding='utf-8'), \"lxml\")\n for line in rawhtml.html.body.div.find_all('span'):\n line_list = []\n if line['class'][0] == 'ocr_line':\n line_list.append(int(line['id'].split('_')[2]))\n line_list += [int(i)\n for i in line['title'].split(';')[0].split(' ')[1:]]\n line_list += [0, 0, 0]\n page_array.append(line_list)\n return np.array(page_array), rawhtml\n\n\ndef normalize_entry(entry):\n replacements = [(\"‘\", \"'\"), (\"’\", \"'\"), (\" ay.\", \" av.\"),\n (\" ay,\", \" av,\"), (\"- \", \"-\"), (\" -\", \"-\"), (\"\\t\", ' ')]\n for swap in replacements:\n entry = entry.replace(swap[0], swap[1])\n return ' '.join(entry.split())\n\n\ndef remove_precede_space(entry):\n if re.search(r'\\s\\.|\\s\\,', entry):\n entry = entry.replace(' .', '.').replace(' ,', ',')\n return entry\n\n\ndef normalize_labeled_entry(labeled_entry_dict):\n new_subjects = []\n for subject in labeled_entry_dict['subjects']:\n new_subjects.append(remove_precede_space(subject))\n labeled_entry_dict['subjects'] = new_subjects\n new_occs = []\n for occ in labeled_entry_dict['occupations']:\n new_occs.append(remove_precede_space(occ))\n labeled_entry_dict['occupations'] = new_occs\n new_locations = []\n for loc_dict in labeled_entry_dict['locations']:\n new_loc_dict = {}\n new_loc_dict['value'] = remove_precede_space(loc_dict['value'])\n try:\n new_loc_dict['labels'] = loc_dict['labels']\n except:\n pass\n new_locations.append(new_loc_dict)\n labeled_entry_dict['locations'] = new_locations\n return labeled_entry_dict\n\n\ndef build_entries(args):\n \"\"\"\n Page Array Structure\n col 0 = ID number of line\n col 1-4 = bbbox x1, y1, x2, y2\n col 5 = 0:col line; 1: indent line; 2:kill line\n col 6 = 1:col 1; 2: col2 (column assignment)\n col 7 = 0:ok to append, 1: do not append (likely gap)\"\"\"\n\n root = '/'.join(args.path.split('/')[:-1])\n directory_uuid = root.split('/')[-1]\n hocr_files = [file for file in os.listdir(\n args.path) if file.endswith('.hocr')]\n\n print(\"Processing: \", directory_uuid)\n\n for hocr_file in hocr_files:\n\n try:\n page_uuid = hocr_file.replace('_rotated', '').replace(\n '_cropped', '').replace('.hocr', '')\n # try:\n raw_hocr_array, page_html = load_hocr_lines(\n os.path.join(args.path, hocr_file))\n\n # jpeg_path = os.path.join(\n # root, args.jpeg_directory, hocr_file.replace('.hocr', '.jpeg'))\n\n ##\n # Find our likely column locations\n ##\n\n if raw_hocr_array.size == 0:\n print(\"Empty HOCR file: \" + hocr_file)\n print(\"Passing: ----------------------------------\")\n continue\n\n kmeans = KMeans(n_clusters=8).fit(\n raw_hocr_array[:, 1].reshape(-1, 1))\n centroids = kmeans.cluster_centers_\n cands_cols = {}\n\n for j in range(len(centroids)):\n cands_cols[centroids[j, 0]] = 0\n std = sqrt(mean([((i - centroids[j, 0])**2)\n for i in raw_hocr_array[:, 1]]))\n for i in range(len(raw_hocr_array)):\n if abs(raw_hocr_array[i:i+1, 1] - centroids[j, 0]) > (std/16):\n pass\n else:\n cands_cols[centroids[j, 0]] += 1\n\n # We have our dict of possible column location along with the number of entries proximate to each\n # But in case our k-means clustering was compromised by a slanted column line (yielding double col locations)\n # we need to check for that and take an average of the resultant double col1 and double col2 locations\n\n halfway_page_rough = (max(raw_hocr_array[:, 1])/2)*.95\n lefthand_cands = [\n i for i in cands_cols.items() if i[0] < halfway_page_rough]\n righthand_cands = [\n i for i in cands_cols.items()if i[0] > halfway_page_rough]\n col1_xval_cands = sorted(\n lefthand_cands, key=lambda x: x[1], reverse=True)\n col2_xval_cands = sorted(\n righthand_cands, key=lambda x: x[1], reverse=True)\n\n # Now that we've split our candidate locations into what we think is roughly the col1 and col2 areas\n # we look for two closely proximate top candidates. If they exist, we average them out\n\n if abs(col1_xval_cands[0][0] - col1_xval_cands[1][0]) < 50:\n col1_xval = mean(\n [col1_xval_cands[0][0], col1_xval_cands[1][0]])\n else:\n col1_xval = col1_xval_cands[0][0]\n try:\n if abs(col2_xval_cands[0][0] - col2_xval_cands[1][0]) < 50:\n col2_xval = mean(\n [col2_xval_cands[0][0], col2_xval_cands[1][0]])\n else:\n col2_xval = col2_xval_cands[0][0]\n\n # For cases where we don't have multiple candidates for a second column,\n # possibly because Tesseract missed second column entirely:\n except:\n col2_xval = col2_xval_cands[0][0]\n\n ##\n # Pass to identify and keep lines that are at x-val of column edges\n ##\n\n std1 = sqrt(mean([((i - col1_xval)**2)\n for i in raw_hocr_array[:, 1]]))/16\n std2 = sqrt(mean([((i - col2_xval)**2)\n for i in raw_hocr_array[:, 1]]))/16\n for i in range(len(raw_hocr_array)):\n if abs(raw_hocr_array[i:i + 1, 1] - col1_xval) < std1 or abs(raw_hocr_array[i:i + 1, 1] - col2_xval) < std2:\n # Col 1 identification:\n if abs(raw_hocr_array[i:i + 1, 1] - col1_xval) < std1:\n raw_hocr_array[i:i + 1, 6] = 1\n # Col 2 identification:\n elif abs(raw_hocr_array[i:i + 1, 1] - col2_xval) < std2:\n raw_hocr_array[i:i + 1, 6] = 2\n # Id of indents and any potential chopped off lines to right of columns\n elif raw_hocr_array[i:i + 1, 1] - (col1_xval + std1) > 0 and raw_hocr_array[i:i + 1, 1] < col2_xval:\n # Col 1 indents identification\n raw_hocr_array[i:i + 1, 5] = 1\n raw_hocr_array[i:i + 1, 6] = 1\n elif raw_hocr_array[i:i + 1, 1] - (col2_xval + std2) > 0:\n # Col 2 indents identification\n raw_hocr_array[i:i + 1, 5] = 1\n raw_hocr_array[i:i + 1, 6] = 2\n # Eliminating anything to left of column 1\n if raw_hocr_array[i:i+1, 1] < (col1_xval - std1):\n raw_hocr_array[i:i + 1, 5] = 2\n # Eliminating anything to right of column 2 right edge\n if raw_hocr_array[i:i + 1, 1] > (col2_xval + (col2_xval - col1_xval)):\n raw_hocr_array[i:i + 1, 5] = 2\n\n ##\n # Pass to find lines flush with a column whose y vals make them unlikely to be in the page block for entries\n ##\n\n reduced_array = raw_hocr_array[raw_hocr_array[:, 5] != 2]\n sorted_y_array = np.sort(reduced_array.view(\n 'i8,i8,i8,i8,i8,i8,i8,i8'), order=['f2'], axis=0).view(np.int)\n\n # To find an appropriate vertical line density of all likely lines, we grab a sample at a point roughly 1/4\n # the way through our entries, then gather the line density at that point within a gap around that point\n # The gap is calculated at 0.5% of the highest yval (roughly 0.5% of the yval height of the page)\n\n quarter_page = len(sorted_y_array)//4\n gap = float(max(raw_hocr_array[:, 2]))*.05\n entry_density = len([i for i in sorted_y_array[:, 2] if abs(\n i - sorted_y_array[quarter_page:quarter_page+1, 2]) < gap/2])\n\n # We now examine the line density around every line in the page; if the density is low, we do a second check to make\n # sure the reason isn't that it is a first or last line; in those cases we check for gap density after/before the line\n # Anything that still fails we cut\n\n for i in range(len(sorted_y_array)):\n proximate_lines = [yval for yval in sorted_y_array[:, 2] if abs(\n yval - sorted_y_array[i:i+1, 2]) < gap/2]\n if len(proximate_lines) - 1 < entry_density/2:\n top_line_proximate_lines = [yval for yval in sorted_y_array[:, 2] if yval - sorted_y_array[i:i + 1, 2] < gap\n and yval - sorted_y_array[i:i + 1, 2] > 0]\n bottom_line_proximate_lines = [yval for yval in sorted_y_array[:, 2] if\n sorted_y_array[i:i + 1, 2] - yval < gap and sorted_y_array[i:i + 1, 2] - yval > 0]\n if len(top_line_proximate_lines) > entry_density or len(bottom_line_proximate_lines) > entry_density:\n pass\n else:\n sorted_y_array[i:i+1, 5] = 2\n\n ##\n # Pass to look for missing lines by finding no line with a yval that is 1.95% of the expected space\n # between lines; if a missing line is found, we mark the previous line to make sure an indent isn't appended to that line\n # Those indents following a gap will become a standalone line because their head-entry is missing\n ##\n\n line_only_array = sorted_y_array[sorted_y_array[:, 5] != 2]\n sorted_line_only_array = np.sort(line_only_array.view(\n 'i8,i8,i8,i8,i8,i8,i8,i8'), order=['f6', 'f2'], axis=0).view(np.int)\n sample_lines = sorted_line_only_array[sorted_line_only_array[:, 6] == 1]\n gaps = []\n\n for i in range(len(sample_lines)):\n try:\n gaps.append(\n int(sample_lines[i+1:i+2, 2] - sample_lines[i:i+1, 2]))\n except:\n pass\n\n average_line_gap = sum(gaps) // len(sample_lines)\n gap_locations = []\n for i in range(len(sorted_line_only_array)):\n try:\n if int(sorted_line_only_array[i + 1:i + 2, 2] - sorted_line_only_array[i:i + 1, 2]) > average_line_gap*1.95:\n sorted_line_only_array[i:i+1, 7] = 1\n gap_locations.append(\n sorted_line_only_array[i:i + 1, 2] + average_line_gap*1.5)\n except:\n pass\n\n # We can either build the image or return the json\n\n if args.make_image == 'True':\n imagebuilder(sorted_line_only_array, [\n col1_xval, col2_xval], jpeg_path, std1, gap_locations, page_uuid, os.path.join(root, args.bbox_location))\n entries_json = json_from_hocr(\n sorted_line_only_array, page_html, page_uuid, directory_uuid)\n build_manifest(root, entries_json)\n if args.mode == 'P':\n print(entries_json)\n outpath = os.path.join(\n args.path_out, os.path.splitext(hocr_file)[0] + \".json\")\n f = open(outpath, \"w\")\n f.write(json.dumps(entries_json))\n f.close()\n print(\"----------------------------------------\")\n else:\n classifier = Classifier.Classifier()\n classifier.load_training(args.crf_training_path)\n classifier.train()\n for rec in entries_json:\n entry = LabeledEntry.LabeledEntry(\n entries_json[rec]['complete_entry'])\n classifier.label(entry)\n final_entries = normalize_labeled_entry(entry.categories)\n entries_json[rec]['labeled_entry'] = final_entries\n if args.mode == 'CRF-print':\n print(entries_json[rec])\n if args.mode == 'CRF':\n with open(os.path.join(root, 'final-entries', page_uuid + '_labeled.json'), 'w') as f:\n for rec in sorted(entries_json.keys()):\n f.write(json.dumps(entries_json[rec]) + '\\n')\n f.close()\n if args.tsv_path != \"False\":\n build_entries_tsv(\n entries_json, args.tsv_path, directory_uuid)\n print(\"Completed processing of \", page_uuid)\n\n except Exception as exception:\n print(exception.__traceback__)\n print(\"Likely ad or problematic hocr in :\", hocr_file, \". Skipped.\")\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Parse hocr files and return entries\")\n parser.add_argument(\"-in\", help=\"Full-path directory containing hocr files\",\n dest=\"path\", type=str, required=True)\n parser.add_argument(\"-build-image\", help=\"Set whether to make images (True/False)\",\n dest=\"make_image\", default=\"False\", type=str, required=True)\n parser.add_argument(\"-jpegs\", help=\"Name of directory (not path) containing jpegs\",\n dest=\"jpeg_directory\", type=str, required=False)\n parser.add_argument(\"-bbox-out\", help=\"Full path to directory to place output bbox images\",\n dest=\"bbox_location\", type=str, required=False)\n parser.add_argument(\"-mode\", help=\"Either (P)rint out extracted entries, apply (CRF-print) and print out entries, or (CRF) and save JSON entries in labeled-json directory\",\n dest=\"mode\", type=str, required=True)\n parser.add_argument(\"-path-training\", help=\"Path to the training files for CRF classifer\",\n dest=\"crf_training_path\", type=str, required=False)\n parser.add_argument(\"-path-out\", help=\"Path to the training files for CRF classifer\",\n dest=\"path_out\", type=str, required=False)\n parser.add_argument(\"-build-tsv\", help=\"(False) or path to directory where tsv will be made\",\n dest=\"tsv_path\", type=str, required=False)\n parser.set_defaults(func=build_entries)\n args = parser.parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "sklearn.cluster.KMeans" ] ]
Ben-Foxman/z-quantum-qcbm
[ "6e0aff5283f4a1b2b788317c3a12f8ef8c562a29" ]
[ "src/python/zquantum/qcbm/ansatz.py" ]
[ "import numpy as np\nimport sympy\nfrom zquantum.core.circuit import Circuit, Qubit, Gate, create_layer_of_gates\nfrom zquantum.core.interfaces.ansatz import Ansatz\nfrom zquantum.core.interfaces.ansatz_utils import (\n ansatz_property,\n invalidates_parametrized_circuit,\n)\nfrom typing import Optional, List\nfrom .ansatz_utils import get_entangling_layer\n\nfrom overrides import overrides\n\n\nclass QCBMAnsatz(Ansatz):\n\n supports_parametrized_circuits = True\n number_of_qubits = ansatz_property(\"number_of_qubits\")\n topology = ansatz_property(\"topology\")\n\n def __init__(\n self, number_of_layers: int, number_of_qubits: int, topology: str = \"all\",\n ):\n \"\"\"\n An ansatz implementation used for running the Quantum Circuit Born Machine.\n\n Args:\n number_of_layers (int): number of entangling layers in the circuit.\n number_of_qubits (int): number of qubits in the circuit.\n topology (str): the topology representing the connectivity of the qubits.\n\n Attributes:\n number_of_qubits (int): See Args\n number_of_layers (int): See Args\n topology (str): See Args\n number_of_params: number of the parameters that need to be set for the ansatz circuit.\n \"\"\"\n super().__init__(number_of_layers)\n self._number_of_qubits = number_of_qubits\n self._topology = topology\n if number_of_layers == 0:\n raise ValueError(\"QCBMAnsatz is only defined for number_of_layers > 0.\")\n\n @property\n def number_of_params(self) -> int:\n \"\"\"\n Returns number of parameters in the ansatz.\n \"\"\"\n return np.sum(self.get_number_of_parameters_by_layer())\n\n @property\n def n_params_per_ent_layer(self) -> int:\n if self.topology == \"all\":\n return int((self.number_of_qubits * (self.number_of_qubits - 1)) / 2)\n elif self.topology == \"line\":\n return self.number_of_qubits - 1\n else:\n raise RuntimeError(\"Topology {} is not supported\".format(self.topology))\n\n @overrides\n def _generate_circuit(self, params: Optional[np.ndarray] = None) -> Circuit:\n \"\"\"Builds a qcbm ansatz circuit, using the ansatz in https://advances.sciencemag.org/content/5/10/eaaw9918/tab-pdf (Fig.2 - top).\n\n Args:\n params (numpy.array): input parameters of the circuit (1d array).\n\n Returns:\n Circuit\n \"\"\"\n if params is None:\n params = np.asarray(\n [sympy.Symbol(\"theta_{}\".format(i)) for i in range(self.number_of_params)]\n )\n\n assert len(params) == self.number_of_params\n\n if self.number_of_layers == 1:\n # Only one layer, should be a single layer of rotations with Rx\n return create_layer_of_gates(self.number_of_qubits, \"Rx\", params)\n\n circuit = Circuit()\n parameter_index = 0\n for layer_index in range(self.number_of_layers):\n if layer_index == 0:\n # First layer is always 2 single qubit rotations on Rx Rz\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rx\",\n params[parameter_index : parameter_index + self.number_of_qubits],\n )\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rz\",\n params[\n parameter_index\n + self.number_of_qubits : parameter_index\n + 2 * self.number_of_qubits\n ],\n )\n parameter_index += 2 * self.number_of_qubits\n elif (\n self.number_of_layers % 2 == 1\n and layer_index == self.number_of_layers - 1\n ):\n # Last layer for odd number of layers is rotations on Rx Rz\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rz\",\n params[parameter_index : parameter_index + self.number_of_qubits],\n )\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rx\",\n params[\n parameter_index\n + self.number_of_qubits : parameter_index\n + 2 * self.number_of_qubits\n ],\n )\n parameter_index += 2 * self.number_of_qubits\n elif (\n self.number_of_layers % 2 == 0\n and layer_index == self.number_of_layers - 2\n ):\n # Even number of layers, second to last layer is 3 rotation layer with Rx Rz Rx\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rx\",\n params[parameter_index : parameter_index + self.number_of_qubits],\n )\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rz\",\n params[\n parameter_index\n + self.number_of_qubits : parameter_index\n + 2 * self.number_of_qubits\n ],\n )\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rx\",\n params[\n parameter_index\n + 2 * self.number_of_qubits : parameter_index\n + 3 * self.number_of_qubits\n ],\n )\n parameter_index += 3 * self.number_of_qubits\n elif (\n self.number_of_layers % 2 == 1\n and layer_index == self.number_of_layers - 3\n ):\n # Odd number of layers, third to last layer is 3 rotation layer with Rx Rz Rx\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rx\",\n params[parameter_index : parameter_index + self.number_of_qubits],\n )\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rz\",\n params[\n parameter_index\n + self.number_of_qubits : parameter_index\n + 2 * self.number_of_qubits\n ],\n )\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rx\",\n params[\n parameter_index\n + 2 * self.number_of_qubits : parameter_index\n + 3 * self.number_of_qubits\n ],\n )\n parameter_index += 3 * self.number_of_qubits\n elif layer_index % 2 == 1:\n # Currently on an entangling layer\n circuit += get_entangling_layer(\n params[\n parameter_index : parameter_index + self.n_params_per_ent_layer\n ],\n self.number_of_qubits,\n \"XX\",\n self.topology,\n )\n parameter_index += self.n_params_per_ent_layer\n else:\n # A normal single qubit rotation layer of Rx Rz\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rx\",\n params[parameter_index : parameter_index + self.number_of_qubits],\n )\n circuit += create_layer_of_gates(\n self.number_of_qubits,\n \"Rz\",\n params[\n parameter_index\n + self.number_of_qubits : parameter_index\n + 2 * self.number_of_qubits\n ],\n )\n parameter_index += 2 * self.number_of_qubits\n\n return circuit\n\n def get_number_of_parameters_by_layer(self) -> np.ndarray:\n \"\"\"Determine the number of parameters needed for each layer in the ansatz\n\n Returns:\n A 1D array of integers \n \"\"\"\n if self.number_of_layers == 1:\n # If only one layer, then only need parameters for a single layer of Rx gates\n return np.asarray([self.number_of_qubits])\n\n num_params_by_layer = []\n for layer_index in range(self.number_of_layers):\n if layer_index == 0:\n # First layer is always 2 parameters per qubit for 2 single qubit rotations\n num_params_by_layer.append(self.number_of_qubits * 2)\n elif (\n self.number_of_layers % 2 == 1\n and layer_index == self.number_of_layers - 1\n ):\n # Last layer for odd number of layers is 2 layer rotations\n num_params_by_layer.append(self.number_of_qubits * 2)\n elif (\n self.number_of_layers % 2 == 0\n and layer_index == self.number_of_layers - 2\n ):\n # Even number of layers, second to last layer is 3 rotation layer\n num_params_by_layer.append(self.number_of_qubits * 3)\n elif (\n self.number_of_layers % 2 == 1\n and layer_index == self.number_of_layers - 3\n ):\n # Odd number of layers, third to last layer is 3 rotation layer\n num_params_by_layer.append(self.number_of_qubits * 3)\n elif layer_index % 2 == 1:\n # Currently on an entangling layer\n num_params_by_layer.append(self.n_params_per_ent_layer)\n else:\n # A normal single qubit rotation layer\n num_params_by_layer.append(self.number_of_qubits * 2)\n\n return np.asarray(num_params_by_layer)" ]
[ [ "numpy.asarray" ] ]
EmilRyberg/P8LH7Grounding
[ "406fdf4ce9afd160df3d7105fedea563a284974b" ]
[ "webots/controllers/ur_controller/kinematics/kinematics.py" ]
[ "import numpy as np\n\nfrom kinematics.dhparameters import DHParameters\n\n\nclass Kinematics:\n def __init__(self):\n self.joint1_dh = DHParameters(0, 0.1625, 0)\n self.joint2_dh = DHParameters(0, 0, np.pi / 2)\n self.joint3_dh = DHParameters(-0.425, 0, 0)\n self.joint4_dh = DHParameters(-0.39225, 0.1333, 0)\n self.joint5_dh = DHParameters(0, 0.0997, np.pi / 2)\n self.joint6_dh = DHParameters(0, 0.0996, -np.pi / 2)\n\n def compute_transformation_matrix(self, theta, dh_params):\n c = np.cos(theta)\n s = np.sin(theta)\n ca = np.cos(dh_params.alpha)\n sa = np.sin(dh_params.alpha)\n A = [[c, -s, 0, dh_params.a],\n [s*ca, c*ca, -sa, -sa*dh_params.d],\n [s*sa, c*sa, ca, ca*dh_params.d],\n [0, 0, 0, 1]]\n A = np.array(A)\n return A\n\n\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.cos" ] ]
omkarudawant/ML-Service
[ "cb612585cbe9619b86c6ea44dd9069a1bc28a8f8" ]
[ "backend/server/apps/ml/income_classifier/extra_trees.py" ]
[ "import joblib\nimport pandas as pd\n\n\nclass ExtraTreesClassifier:\n def __init__(self):\n path_to_artifacts = \"../../research/\"\n self.values_fill_missing = joblib.load(path_to_artifacts +\n \"train_mode.joblib\")\n self.encoders = joblib.load(path_to_artifacts + \"encoders.joblib\")\n self.model = joblib.load(path_to_artifacts + \"extra_trees.joblib\")\n\n def preprocessing(self, input_data):\n # JSON to pandas DataFrame\n input_data = pd.DataFrame(input_data, index=[0])\n # fill missing values\n input_data.fillna(self.values_fill_missing)\n # convert categoricals\n for column in [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n ]:\n categorical_convert = self.encoders[column]\n input_data[column] = categorical_convert.transform(\n input_data[column])\n\n return input_data\n\n def predict(self, input_data):\n return self.model.predict_proba(input_data)\n\n def postprocessing(self, input_data):\n label = \"<=50K\"\n if input_data[1] > 0.5:\n label = \">50K\"\n return {\"probability\": input_data[1], \"label\": label, \"status\": \"OK\"}\n\n def compute_prediction(self, input_data):\n try:\n input_data = self.preprocessing(input_data)\n prediction = self.predict(input_data)[0] # only one sample\n prediction = self.postprocessing(prediction)\n except Exception as e:\n return {\"status\": \"Error\", \"message\": str(e)}\n\n return prediction" ]
[ [ "pandas.DataFrame" ] ]
rlupat/moanna
[ "81e97b95033a2c1004429c17c41d1c660e62bc83" ]
[ "moanna/helper/SurvivalHelperFunctions.py" ]
[ "def map_label(df, column, mapping):\n df[column] = df[column].replace(mapping)\n return df\n\ndef map_mult_labels(df, columns, labels):\n for column in columns:\n df = map_label(df, labels[column][0], labels[column][1])\n \n return df\n\n#https://stackoverflow.com/questions/26886653/pandas-create-new-column-based-on-values-from-other-columns-apply-a-function-o\ndef surv_event(row, surv_year):\n if row['Event'] in [\"DECEASED\", \"Recurred/Progressed\", \"PROGRESSION\"]:\n if row['Time'] <= surv_year:\n return 1\n else:\n return 0\n else:\n return 0\n \ndef do_survival(clin_df, type_surv, labels_mapping, surv_year=120, filename=\"survival.png\"): #type=[OS, DFS, RFS]\n import pandas as pd\n import matplotlib.pyplot as plt\n\n \n #header = [\"ERStatus\", \"HER2Status\", \"Pam50Subtype\", \"Age\", \"Time\", \"Event\"]\n header = [\"Pam50Subtype\", \"Time\", \"Event\"]\n df_surv = pd.DataFrame(index=clin_df.index, columns=header)\n #df_surv.ERStatus = clin_df.loc[df_surv.index, ['ERStatus']]\n #df_surv.HER2Status = clin_df.loc[df_surv.index, ['HER2Status']]\n df_surv.Pam50Subtype = clin_df.loc[df_surv.index, ['Pam50Subtype']]\n #df_surv.Age = clin_df.loc[df_surv.index, ['Age']]\n if type_surv == \"OS\":\n df_surv.Time = clin_df.loc[df_surv.index, ['OS_MONTHS']]\n df_surv.Event = clin_df.loc[df_surv.index, ['OS_STATUS']]\n elif type_surv == \"DFS\":\n df_surv.Time = clin_df.loc[df_surv.index, ['DFS_MONTHS']]\n df_surv.Event = clin_df.loc[df_surv.index, ['DFS_STATUS']]\n elif type_surv == \"RFS\":\n df_surv.Time = clin_df.loc[df_surv.index, ['RFS_MONTHS']]\n df_surv.Event = clin_df.loc[df_surv.index, ['RFS_STATUS']] \n \n df_surv[\"SurvEvent\"]=df_surv.apply(lambda row: surv_event(row, surv_year), axis=1)\n df_surv.loc[df_surv['Time']>surv_year, 'SurvTime'] = surv_year\n df_surv.loc[df_surv['Time']<=surv_year, 'SurvTime'] = df_surv['Time']\n\n df_surv_final = df_surv.drop(['Time', 'Event'], axis=1)\n print (df_surv_final.shape)\n print (sum(df_surv_final.SurvTime.isna()))\n df_surv_final = df_surv_final[~df_surv_final.SurvTime.isna()]\n print (sum(df_surv_final.SurvTime.isna()))\n print (df_surv_final.shape)\n \n from lifelines import CoxPHFitter\n cph = CoxPHFitter()\n cph.fit(df_surv_final, duration_col='SurvTime', event_col='SurvEvent')\n cph.print_summary() \n \n #cph.plot()\n #cph.plot_covariate_groups('ERStatus', [3,2,1,0])\n \n from lifelines import KaplanMeierFitter\n kmf = KaplanMeierFitter()\n fig = plt.figure(figsize=(10,10))\n ax = plt.subplot(111)\n for name, grouped_df in df_surv_final.groupby('Pam50Subtype'):\n print (name)\n kmf.fit(grouped_df[\"SurvTime\"], grouped_df[\"SurvEvent\"], label=name)\n kmf.plot(ax=ax, ci_show=False, linewidth=4, color=['firebrick', 'hotpink', 'darkblue', 'aqua'][name])\n \n ax.legend(labels_mapping)\n plt.savefig(filename)\n return cph, kmf " ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.subplot", "pandas.DataFrame", "matplotlib.pyplot.savefig" ] ]
ment911/entsoe-py
[ "67cb4386796c242c112512fbf1f50b68723c861a" ]
[ "entsoe/entsoe.py" ]
[ "import logging\nfrom functools import wraps\nfrom socket import gaierror\nfrom time import sleep\nfrom typing import Union, Optional, Dict\n\nimport pandas as pd\nfrom pandas.tseries.offsets import YearBegin, YearEnd\nimport pytz\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom entsoe.exceptions import InvalidPSRTypeError, InvalidBusinessParameterError\nfrom .exceptions import NoMatchingDataError, PaginationError\nfrom .mappings import Area, NEIGHBOURS, lookup_area\nfrom .misc import year_blocks, day_blocks\nfrom .parsers import parse_prices, parse_loads, parse_generation, \\\n parse_installed_capacity_per_plant, parse_crossborder_flows, \\\n parse_unavailabilities, parse_contracted_reserve, parse_imbalance_prices_zip, \\\n parse_netpositions, parse_procured_balancing_capacity\n\n__title__ = \"entsoe-py\"\n__version__ = \"0.3.8\"\n__author__ = \"EnergieID.be\"\n__license__ = \"MIT\"\n\nURL = 'https://transparency.entsoe.eu/api'\n\n\ndef retry(func):\n \"\"\"Catches connection errors, waits and retries\"\"\"\n\n @wraps(func)\n def retry_wrapper(*args, **kwargs):\n self = args[0]\n error = None\n for _ in range(self.retry_count):\n try:\n result = func(*args, **kwargs)\n except (requests.ConnectionError, gaierror) as e:\n error = e\n print(\"Connection Error, retrying in {} seconds\".format(\n self.retry_delay))\n sleep(self.retry_delay)\n continue\n else:\n return result\n else:\n raise error\n\n return retry_wrapper\n\n\nclass EntsoeRawClient:\n # noinspection LongLine\n \"\"\"\n Client to perform API calls and return the raw responses API-documentation:\n https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html#_request_methods\n\n Attributions: Parts of the code for parsing Entsoe responses were copied\n from https://github.com/tmrowco/electricitymap\n \"\"\"\n\n def __init__(\n self, api_key: str, session: Optional[requests.Session] = None,\n retry_count: int = 1, retry_delay: int = 0,\n proxies: Optional[Dict] = None, timeout: Optional[int] = None):\n \"\"\"\n Parameters\n ----------\n api_key : str\n session : requests.Session\n retry_count : int\n number of times to retry the call if the connection fails\n retry_delay: int\n amount of seconds to wait between retries\n proxies : dict\n requests proxies\n timeout : int\n \"\"\"\n if api_key is None:\n raise TypeError(\"API key cannot be None\")\n self.api_key = api_key\n if session is None:\n session = requests.Session()\n self.session = session\n self.proxies = proxies\n self.retry_count = retry_count\n self.retry_delay = retry_delay\n self.timeout = timeout\n\n @retry\n def _base_request(self, params: Dict, start: pd.Timestamp,\n end: pd.Timestamp) -> requests.Response:\n \"\"\"\n Parameters\n ----------\n params : dict\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n requests.Response\n \"\"\"\n start_str = self._datetime_to_str(start)\n end_str = self._datetime_to_str(end)\n\n base_params = {\n 'securityToken': self.api_key,\n 'periodStart': start_str,\n 'periodEnd': end_str\n }\n params.update(base_params)\n\n logging.debug(f'Performing request to {URL} with params {params}')\n response = self.session.get(url=URL, params=params,\n proxies=self.proxies, timeout=self.timeout)\n try:\n response.raise_for_status()\n except requests.HTTPError as e:\n soup = BeautifulSoup(response.text, 'html.parser')\n text = soup.find_all('text')\n if len(text):\n error_text = soup.find('text').text\n if 'No matching data found' in error_text:\n raise NoMatchingDataError\n elif \"check you request against dependency tables\" in error_text:\n raise InvalidBusinessParameterError\n elif \"is not valid for this area\" in error_text:\n raise InvalidPSRTypeError\n elif 'amount of requested data exceeds allowed limit' in error_text:\n requested = error_text.split(' ')[-2]\n allowed = error_text.split(' ')[-5]\n raise PaginationError(\n f\"The API is limited to {allowed} elements per \"\n f\"request. This query requested for {requested} \"\n f\"documents and cannot be fulfilled as is.\")\n raise e\n else:\n return response\n\n @staticmethod\n def _datetime_to_str(dtm: pd.Timestamp) -> str:\n \"\"\"\n Convert a datetime object to a string in UTC\n of the form YYYYMMDDhh00\n\n Parameters\n ----------\n dtm : pd.Timestamp\n Recommended to use a timezone-aware object!\n If timezone-naive, UTC is assumed\n\n Returns\n -------\n str\n \"\"\"\n if dtm.tzinfo is not None and dtm.tzinfo != pytz.UTC:\n dtm = dtm.tz_convert(\"UTC\")\n fmt = '%Y%m%d%H00'\n ret_str = dtm.strftime(fmt)\n return ret_str\n\n def query_day_ahead_prices(self, country_code: Union[Area, str],\n start: pd.Timestamp, end: pd.Timestamp) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A44',\n 'in_Domain': area.code,\n 'out_Domain': area.code\n }\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n \n def query_net_position_dayahead(self, country_code: Union[Area, str],\n start: pd.Timestamp, end: pd.Timestamp) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A25', # Allocation result document\n 'businessType': 'B09', # net position\n 'Contract_MarketAgreement.Type': 'A01', # daily\n 'in_Domain': area.code,\n 'out_Domain': area.code\n }\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_load(self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A65',\n 'processType': 'A16',\n 'outBiddingZone_Domain': area.code,\n 'out_Domain': area.code\n }\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_load_forecast(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, process_type: str = 'A01') -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n process_type : str\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A65',\n 'processType': process_type,\n 'outBiddingZone_Domain': area.code,\n # 'out_Domain': domain\n }\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_generation_forecast(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, process_type: str = 'A01') -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n process_type : str\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A71',\n 'processType': process_type,\n 'in_Domain': area.code,\n }\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_wind_and_solar_forecast(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None,\n process_type: str = 'A01', **kwargs) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter on a single psr type\n process_type : str\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A69',\n 'processType': process_type,\n 'in_Domain': area.code,\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_generation(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None, **kwargs) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter on a single psr type\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A75',\n 'processType': 'A16',\n 'in_Domain': area.code,\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_generation_per_plant(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None, **kwargs) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter on a single psr type\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A73',\n 'processType': 'A16',\n 'in_Domain': area.code,\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_installed_generation_capacity(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A68',\n 'processType': 'A33',\n 'in_Domain': area.code,\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_installed_generation_capacity_per_unit(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A71',\n 'processType': 'A33',\n 'in_Domain': area.code,\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_crossborder_flows(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, **kwargs) -> str:\n \"\"\"\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n return self._query_crossborder(\n country_code_from=country_code_from,\n country_code_to=country_code_to, start=start, end=end,\n doctype=\"A11\", contract_marketagreement_type=None)\n\n def query_scheduled_exchanges(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str],\n start: pd.Timestamp,\n end: pd.Timestamp,\n dayahead: bool = False,\n **kwargs) -> str:\n \"\"\"\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n dayahead : bool\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n if dayahead:\n contract_marketagreement_type = \"A01\"\n else:\n contract_marketagreement_type = \"A05\"\n return self._query_crossborder(\n country_code_from=country_code_from,\n country_code_to=country_code_to, start=start, end=end,\n doctype=\"A09\", contract_marketagreement_type=contract_marketagreement_type)\n\n def query_net_transfer_capacity_dayahead(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> str:\n \"\"\"\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n return self._query_crossborder(\n country_code_from=country_code_from,\n country_code_to=country_code_to, start=start, end=end,\n doctype=\"A61\", contract_marketagreement_type=\"A01\")\n\n def query_net_transfer_capacity_weekahead(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> str:\n \"\"\"\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n return self._query_crossborder(\n country_code_from=country_code_from,\n country_code_to=country_code_to, start=start, end=end,\n doctype=\"A61\", contract_marketagreement_type=\"A02\")\n\n def query_net_transfer_capacity_monthahead(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> str:\n \"\"\"\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n return self._query_crossborder(\n country_code_from=country_code_from,\n country_code_to=country_code_to, start=start, end=end,\n doctype=\"A61\", contract_marketagreement_type=\"A03\")\n\n def query_net_transfer_capacity_yearahead(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> str:\n \"\"\"\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n str\n \"\"\"\n return self._query_crossborder(\n country_code_from=country_code_from,\n country_code_to=country_code_to, start=start, end=end,\n doctype=\"A61\", contract_marketagreement_type=\"A04\")\n \n def query_intraday_offered_capacity(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, implicit:bool = True,**kwargs) -> str:\n \"\"\"\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n implicit: bool (True = implicit - default for most borders. False = explicit - for instance BE-GB)\n\n Returns\n -------\n str\n \"\"\"\n return self._query_crossborder(\n country_code_from=country_code_from,\n country_code_to=country_code_to, start=start, end=end,\n doctype=\"A31\", contract_marketagreement_type=\"A07\",\n auction_type=(\"A01\" if implicit==True else \"A02\"))\n\n\n def _query_crossborder(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, doctype: str,\n contract_marketagreement_type: Optional[str] = None,\n auction_type: Optional[str] = None) -> str:\n \"\"\"\n Generic function called by query_crossborder_flows, \n query_scheduled_exchanges, query_net_transfer_capacity_DA/WA/MA/YA and query_.\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n doctype: str\n contract_marketagreement_type: str\n\n Returns\n -------\n str\n \"\"\"\n area_in = lookup_area(country_code_to)\n area_out = lookup_area(country_code_from)\n\n params = {\n 'documentType': doctype,\n 'in_Domain': area_in.code,\n 'out_Domain': area_out.code\n }\n if contract_marketagreement_type is not None:\n params[\n 'contract_MarketAgreement.Type'] = contract_marketagreement_type\n if auction_type is not None:\n params[\n 'Auction.Type'] = auction_type\n\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_imbalance_prices(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None) -> bytes:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n bytes\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A85',\n 'controlArea_Domain': area.code,\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.content\n\n def query_procured_balancing_capacity(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, process_type: str,\n type_marketagreement_type: Optional[str] = None) -> bytes:\n \"\"\"\n Activated Balancing Energy [17.1.E]\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n process_type : str\n A51 ... aFRR; A47 ... mFRR\n type_marketagreement_type : str\n type of contract (see mappings.MARKETAGREEMENTTYPE)\n\n Returns\n -------\n bytes\n \"\"\"\n if process_type not in ['A51', 'A47']:\n raise ValueError('processType allowed values: A51, A47')\n\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A15',\n 'area_Domain': area.code,\n 'processType': process_type\n }\n if type_marketagreement_type:\n params.update({'type_MarketAgreement.Type': type_marketagreement_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.content\n\n def query_activated_balancing_energy(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, business_type: str, \n psr_type: Optional[str] = None) -> bytes:\n \"\"\"\n Activated Balancing Energy [17.1.E]\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n business_type : str\n type of contract (see mappings.BSNTYPE)\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n bytes\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A83',\n 'controlArea_Domain': area.code,\n 'businessType': business_type\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.content\n\n def query_contracted_reserve_prices(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, type_marketagreement_type: str,\n psr_type: Optional[str] = None) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n type_marketagreement_type : str\n type of contract (see mappings.MARKETAGREEMENTTYPE)\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A89',\n 'controlArea_Domain': area.code,\n 'type_MarketAgreement.Type': type_marketagreement_type,\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def query_contracted_reserve_amount(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, type_marketagreement_type: str,\n psr_type: Optional[str] = None) -> str:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n type_marketagreement_type : str\n type of contract (see mappings.MARKETAGREEMENTTYPE)\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n str\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': 'A81',\n 'controlArea_Domain': area.code,\n 'type_MarketAgreement.Type': type_marketagreement_type,\n }\n if psr_type:\n params.update({'psrType': psr_type})\n response = self._base_request(params=params, start=start, end=end)\n return response.text\n\n def _query_unavailability(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, doctype: str, docstatus: Optional[str] = None,\n periodstartupdate: Optional[pd.Timestamp] = None,\n periodendupdate: Optional[pd.Timestamp] = None) -> bytes:\n \"\"\"\n Generic unavailibility query method.\n This endpoint serves ZIP files.\n The query is limited to 200 items per request.\n\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n doctype : str\n docstatus : str, optional\n periodstartupdate : pd.Timestamp, optional\n periodendupdate : pd.Timestamp, optional\n\n Returns\n -------\n bytes\n \"\"\"\n area = lookup_area(country_code)\n params = {\n 'documentType': doctype,\n 'biddingZone_domain': area.code\n # ,'businessType': 'A53 (unplanned) | A54 (planned)'\n }\n if docstatus:\n params['docStatus'] = docstatus\n if periodstartupdate and periodendupdate:\n params['periodStartUpdate'] = self._datetime_to_str(\n periodstartupdate)\n params['periodEndUpdate'] = self._datetime_to_str(periodendupdate)\n response = self._base_request(params=params, start=start, end=end)\n return response.content\n\n def query_unavailability_of_generation_units(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, docstatus: Optional[str] = None,\n periodstartupdate: Optional[pd.Timestamp] = None,\n periodendupdate: Optional[pd.Timestamp] = None) -> bytes:\n \"\"\"\n This endpoint serves ZIP files.\n The query is limited to 200 items per request.\n\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n docstatus : str, optional\n periodstartupdate : pd.Timestamp, optional\n periodendupdate : pd.Timestamp, optional\n\n Returns\n -------\n bytes\n \"\"\"\n content = self._query_unavailability(\n country_code=country_code, start=start, end=end, doctype=\"A80\",\n docstatus=docstatus, periodstartupdate=periodstartupdate,\n periodendupdate=periodendupdate)\n return content\n\n def query_unavailability_of_production_units(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, docstatus: Optional[str] = None,\n periodstartupdate: Optional[pd.Timestamp] = None,\n periodendupdate: Optional[pd.Timestamp] = None) -> bytes:\n \"\"\"\n This endpoint serves ZIP files.\n The query is limited to 200 items per request.\n\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n docstatus : str, optional\n periodstartupdate : pd.Timestamp, optional\n periodendupdate : pd.Timestamp, optional\n\n Returns\n -------\n bytes\n \"\"\"\n content = self._query_unavailability(\n country_code=country_code, start=start, end=end, doctype=\"A77\",\n docstatus=docstatus, periodstartupdate=periodstartupdate,\n periodendupdate=periodendupdate)\n return content\n\n def query_unavailability_transmission(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, docstatus: Optional[str] = None,\n periodstartupdate: Optional[pd.Timestamp] = None,\n periodendupdate: Optional[pd.Timestamp] = None, **kwargs) -> bytes:\n \"\"\"\n Generic unavailibility query method.\n This endpoint serves ZIP files.\n The query is limited to 200 items per request.\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n docstatus : str, optional\n periodstartupdate : pd.Timestamp, optional\n periodendupdate : pd.Timestamp, optional\n\n Returns\n -------\n bytes\n \"\"\"\n area_in = lookup_area(country_code_to)\n area_out = lookup_area(country_code_from)\n params = {\n 'documentType': \"A78\",\n 'in_Domain': area_in.code,\n 'out_Domain': area_out.code\n }\n if docstatus:\n params['docStatus'] = docstatus\n if periodstartupdate and periodendupdate:\n params['periodStartUpdate'] = self._datetime_to_str(\n periodstartupdate)\n params['periodEndUpdate'] = self._datetime_to_str(periodendupdate)\n response = self._base_request(params=params, start=start, end=end)\n return response.content\n\n def query_withdrawn_unavailability_of_generation_units(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> bytes:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n bytes\n \"\"\"\n content = self._query_unavailability(\n country_code=country_code, start=start, end=end,\n doctype=\"A80\", docstatus='A13')\n return content\n \n\n\ndef paginated(func):\n \"\"\"Catches a PaginationError, splits the requested period in two and tries\n again. Finally it concatenates the results\"\"\"\n\n @wraps(func)\n def pagination_wrapper(*args, start, end, **kwargs):\n try:\n df = func(*args, start=start, end=end, **kwargs)\n except PaginationError:\n pivot = start + (end - start) / 2\n df1 = pagination_wrapper(*args, start=start, end=pivot, **kwargs)\n df2 = pagination_wrapper(*args, start=pivot, end=end, **kwargs)\n df = pd.concat([df1, df2])\n return df\n\n return pagination_wrapper\n\n\ndef year_limited(func):\n \"\"\"Deals with calls where you cannot query more than a year, by splitting\n the call up in blocks per year\"\"\"\n\n @wraps(func)\n def year_wrapper(*args, start, end, **kwargs):\n blocks = year_blocks(start, end)\n frames = []\n for _start, _end in blocks:\n try:\n frame = func(*args, start=_start, end=_end, **kwargs)\n except NoMatchingDataError:\n logging.debug(f\"NoMatchingDataError: between {_start} and {_end}\")\n frame = None\n frames.append(frame)\n\n if sum([f is None for f in frames]) == len(frames):\n # All the data returned are void\n raise NoMatchingDataError\n\n df = pd.concat(frames, sort=True)\n df = df.loc[~df.index.duplicated(keep='first')]\n return df\n\n return year_wrapper\n\n\ndef day_limited(func):\n \"\"\"Deals with calls where you cannot query more than a year, by splitting\n the call up in blocks per year\"\"\"\n\n @wraps(func)\n def day_wrapper(*args, start, end, **kwargs):\n blocks = day_blocks(start, end)\n frames = []\n for _start, _end in blocks:\n try:\n frame = func(*args, start=_start, end=_end, **kwargs)\n except NoMatchingDataError:\n print(f\"NoMatchingDataError: between {_start} and {_end}\")\n frame = None\n frames.append(frame)\n\n if sum([f is None for f in frames]) == len(frames):\n # All the data returned are void\n raise NoMatchingDataError\n\n df = pd.concat(frames)\n return df\n\n return day_wrapper\n\n\nclass EntsoePandasClient(EntsoeRawClient):\n @year_limited\n def query_net_position_dayahead(self, country_code: Union[Area, str],\n start: pd.Timestamp, end: pd.Timestamp) -> pd.Series:\n \"\"\"\n\n Parameters\n ----------\n country_code\n start\n end\n\n Returns\n -------\n\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_net_position_dayahead(\n country_code=area, start=start, end=end)\n series = parse_netpositions(text)\n series = series.tz_convert(area.tz)\n series = series.truncate(before=start, after=end)\n return series\n\n @year_limited\n def query_day_ahead_prices(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> pd.Series:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.Series\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_day_ahead_prices(\n country_code=area, start=start, end=end)\n series = parse_prices(text)\n series = series.tz_convert(area.tz)\n series = series.truncate(before=start, after=end)\n return series\n\n @year_limited\n def query_load(self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> pd.Series:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.Series\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_load(\n country_code=area, start=start, end=end)\n series = parse_loads(text)\n series = series.tz_convert(area.tz)\n series = series.truncate(before=start, after=end)\n return series\n\n @year_limited\n def query_load_forecast(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, process_type: str = 'A01') -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n process_type : str\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_load_forecast(\n country_code=area, start=start, end=end, process_type=process_type)\n\n df = parse_loads(text, process_type=process_type)\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n\n @year_limited\n def query_generation_forecast(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, process_type: str = 'A01',\n nett: bool = False) -> Union[pd.DataFrame, pd.Series]:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n process_type : str\n nett : bool\n condense generation and consumption into a nett number\n\n Returns\n -------\n pd.DataFrame | pd.Series\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_generation_forecast(\n country_code=area, start=start, end=end, process_type=process_type)\n df = parse_generation(text, nett=nett)\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n @year_limited\n def query_wind_and_solar_forecast(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None,\n process_type: str = 'A01', **kwargs) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter on a single psr type\n process_type : str\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_wind_and_solar_forecast(\n country_code=area, start=start, end=end, psr_type=psr_type,\n process_type=process_type)\n df = parse_generation(text, nett=True)\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n @year_limited\n def query_generation(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None,\n nett: bool = False, **kwargs) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter on a single psr type\n nett : bool\n condense generation and consumption into a nett number\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_generation(\n country_code=area, start=start, end=end, psr_type=psr_type)\n df = parse_generation(text, nett=nett)\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n @year_limited\n def query_installed_generation_capacity(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(\n EntsoePandasClient, self).query_installed_generation_capacity(\n country_code=area, start=start, end=end, psr_type=psr_type)\n df = parse_generation(text)\n df = df.tz_convert(area.tz)\n # Truncate to YearBegin and YearEnd, because answer is always year-based\n df = df.truncate(before=start - YearBegin(), after=end + YearEnd())\n return df\n\n @year_limited\n def query_installed_generation_capacity_per_unit(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(\n EntsoePandasClient,\n self).query_installed_generation_capacity_per_unit(\n country_code=area, start=start, end=end, psr_type=psr_type)\n df = parse_installed_capacity_per_plant(text)\n return df\n\n @year_limited\n def query_crossborder_flows(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, **kwargs) -> pd.Series:\n \"\"\"\n Note: Result will be in the timezone of the origin country\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.Series\n \"\"\"\n area_to = lookup_area(country_code_to)\n area_from = lookup_area(country_code_from)\n text = super(EntsoePandasClient, self).query_crossborder_flows(\n country_code_from=area_from,\n country_code_to=area_to,\n start=start,\n end=end)\n ts = parse_crossborder_flows(text)\n ts = ts.tz_convert(area_from.tz)\n ts = ts.truncate(before=start, after=end)\n return ts\n\n @year_limited\n def query_scheduled_exchanges(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str],\n start: pd.Timestamp,\n end: pd.Timestamp,\n dayahead: bool = False,\n **kwargs) -> pd.Series:\n \"\"\"\n Note: Result will be in the timezone of the origin country\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n dayahead : bool\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.Series\n \"\"\"\n area_to = lookup_area(country_code_to)\n area_from = lookup_area(country_code_from)\n text = super(EntsoePandasClient, self).query_scheduled_exchanges(\n country_code_from=area_from,\n country_code_to=area_to,\n dayahead=dayahead,\n start=start,\n end=end)\n ts = parse_crossborder_flows(text)\n ts = ts.tz_convert(area_from.tz)\n ts = ts.truncate(before=start, after=end)\n return ts\n\n @year_limited\n def query_net_transfer_capacity_dayahead(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, **kwargs) -> pd.Series:\n \"\"\"\n Note: Result will be in the timezone of the origin country\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.Series\n \"\"\"\n area_to = lookup_area(country_code_to)\n area_from = lookup_area(country_code_from)\n text = super(EntsoePandasClient, self).query_net_transfer_capacity_dayahead(\n country_code_from=area_from,\n country_code_to=area_to,\n start=start,\n end=end)\n ts = parse_crossborder_flows(text)\n ts = ts.tz_convert(area_from.tz)\n ts = ts.truncate(before=start, after=end)\n return ts\n\n @year_limited\n def query_net_transfer_capacity_weekahead(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, **kwargs) -> pd.Series:\n \"\"\"\n Note: Result will be in the timezone of the origin country\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.Series\n \"\"\"\n area_to = lookup_area(country_code_to)\n area_from = lookup_area(country_code_from)\n text = super(EntsoePandasClient, self).query_net_transfer_capacity_weekahead(\n country_code_from=area_from,\n country_code_to=area_to,\n start=start,\n end=end)\n ts = parse_crossborder_flows(text)\n ts = ts.tz_convert(area_from.tz)\n ts = ts.truncate(before=start, after=end)\n return ts\n\n @year_limited\n def query_net_transfer_capacity_monthahead(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, **kwargs) -> pd.Series:\n \"\"\"\n Note: Result will be in the timezone of the origin country\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.Series\n \"\"\"\n area_to = lookup_area(country_code_to)\n area_from = lookup_area(country_code_from)\n text = super(EntsoePandasClient, self).query_net_transfer_capacity_monthahead(\n country_code_from=area_from,\n country_code_to=area_to,\n start=start,\n end=end)\n ts = parse_crossborder_flows(text)\n ts = ts.tz_convert(area_from.tz)\n ts = ts.truncate(before=start, after=end)\n return ts\n \n @year_limited\n def query_net_transfer_capacity_yearahead(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, **kwargs) -> pd.Series:\n \"\"\"\n Note: Result will be in the timezone of the origin country\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.Series\n \"\"\"\n area_to = lookup_area(country_code_to)\n area_from = lookup_area(country_code_from)\n text = super(EntsoePandasClient, self).query_net_transfer_capacity_yearahead(\n country_code_from=area_from,\n country_code_to=area_to,\n start=start,\n end=end)\n ts = parse_crossborder_flows(text)\n ts = ts.tz_convert(area_from.tz)\n ts = ts.truncate(before=start, after=end)\n return ts\n\n @year_limited\n def query_intraday_offered_capacity(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, implicit:bool = True, **kwargs) -> pd.Series:\n \"\"\"\n Note: Result will be in the timezone of the origin country --> to check\n\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n implicit: bool (True = implicit - default for most borders. False = explicit - for instance BE-GB)\n Returns\n -------\n pd.Series\n \"\"\"\n area_to = lookup_area(country_code_to)\n area_from = lookup_area(country_code_from)\n text = super(EntsoePandasClient, self).query_intraday_offered_capacity(\n country_code_from=area_from,\n country_code_to=area_to,\n start=start,\n end=end,\n implicit=implicit)\n ts = parse_crossborder_flows(text)\n ts = ts.tz_convert(area_from.tz)\n ts = ts.truncate(before=start, after=end)\n return ts\n \n @year_limited\n def query_imbalance_prices(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n archive = super(EntsoePandasClient, self).query_imbalance_prices(\n country_code=area, start=start, end=end, psr_type=psr_type)\n df = parse_imbalance_prices_zip(zip_contents=archive)\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n @year_limited\n @paginated\n def query_procured_balancing_capacity(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, process_type: str,\n type_marketagreement_type: Optional[str] = None) -> bytes:\n \"\"\"\n Activated Balancing Energy [17.1.E]\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n process_type : str\n A51 ... aFRR; A47 ... mFRR\n type_marketagreement_type : str\n type of contract (see mappings.MARKETAGREEMENTTYPE)\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_procured_balancing_capacity(\n country_code=area, start=start, end=end,\n process_type=process_type, type_marketagreement_type=type_marketagreement_type)\n df = parse_procured_balancing_capacity(text, area.tz)\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n @year_limited\n def query_activated_balancing_energy(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, business_type: str, \n psr_type: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Activated Balancing Energy [17.1.E]\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n business_type: str\n type of contract (see mappings.BSNTYPE)\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_activated_balancing_energy(\n country_code=area, start=start, end=end, \n business_type=business_type, psr_type=psr_type)\n df = parse_contracted_reserve(text, area.tz, \"quantity\")\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n \n @year_limited\n @paginated\n def query_contracted_reserve_prices(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, type_marketagreement_type: str,\n psr_type: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area, str\n start : pd.Timestamp\n end : pd.Timestamp\n type_marketagreement_type : str\n type of contract (see mappings.MARKETAGREEMENTTYPE)\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_contracted_reserve_prices(\n country_code=area, start=start, end=end,\n type_marketagreement_type=type_marketagreement_type,\n psr_type=psr_type)\n df = parse_contracted_reserve(text, area.tz, \"procurement_price.amount\")\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n @year_limited\n @paginated\n def query_contracted_reserve_amount(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, type_marketagreement_type: str,\n psr_type: Optional[str] = None) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n type_marketagreement_type : str\n type of contract (see mappings.MARKETAGREEMENTTYPE)\n psr_type : str\n filter query for a specific psr type\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_contracted_reserve_amount(\n country_code=area, start=start, end=end,\n type_marketagreement_type=type_marketagreement_type,\n psr_type=psr_type)\n df = parse_contracted_reserve(text, area.tz, \"quantity\")\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n @year_limited\n @paginated\n def _query_unavailability(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, doctype: str, docstatus: Optional[str] = None,\n periodstartupdate: Optional[pd.Timestamp] = None,\n periodendupdate: Optional[pd.Timestamp] = None) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n doctype : str\n docstatus : str, optional\n periodstartupdate : pd.Timestamp, optional\n periodendupdate : pd.Timestamp, optional\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n content = super(EntsoePandasClient, self)._query_unavailability(\n country_code=area, start=start, end=end, doctype=doctype,\n docstatus=docstatus, periodstartupdate=periodstartupdate,\n periodendupdate=periodendupdate)\n df = parse_unavailabilities(content, doctype)\n df = df.tz_convert(area.tz)\n df['start'] = df['start'].apply(lambda x: x.tz_convert(area.tz))\n df['end'] = df['end'].apply(lambda x: x.tz_convert(area.tz))\n df = df.truncate(before=start, after=end)\n return df\n\n def query_unavailability_of_generation_units(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, docstatus: Optional[str] = None,\n periodstartupdate: Optional[pd.Timestamp] = None,\n periodendupdate: Optional[pd.Timestamp] = None) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n docstatus : str, optional\n periodstartupdate : pd.Timestamp, optional\n periodendupdate : pd.Timestamp, optional\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n df = self._query_unavailability(\n country_code=country_code, start=start, end=end, doctype=\"A80\",\n docstatus=docstatus, periodstartupdate=periodstartupdate,\n periodendupdate=periodendupdate)\n return df\n\n def query_unavailability_of_production_units(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, docstatus: Optional[str] = None,\n periodstartupdate: Optional[pd.Timestamp] = None,\n periodendupdate: Optional[pd.Timestamp] = None) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n docstatus : str, optional\n periodstartupdate : pd.Timestamp, optional\n periodendupdate : pd.Timestamp, optional\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n df = self._query_unavailability(\n country_code=country_code, start=start, end=end, doctype=\"A77\",\n docstatus=docstatus, periodstartupdate=periodstartupdate,\n periodendupdate=periodendupdate)\n return df\n\n @paginated\n def query_unavailability_transmission(\n self, country_code_from: Union[Area, str],\n country_code_to: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, docstatus: Optional[str] = None,\n periodstartupdate: Optional[pd.Timestamp] = None,\n periodendupdate: Optional[pd.Timestamp] = None,\n **kwargs) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code_from : Area|str\n country_code_to : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n docstatus : str, optional\n periodstartupdate : pd.Timestamp, optional\n periodendupdate : pd.Timestamp, optional\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area_to = lookup_area(country_code_to)\n area_from = lookup_area(country_code_from)\n content = super(EntsoePandasClient,\n self).query_unavailability_transmission(\n area_from, area_to, start, end, docstatus, periodstartupdate,\n periodendupdate)\n df = parse_unavailabilities(content, \"A78\")\n df = df.tz_convert(area_from.tz)\n df['start'] = df['start'].apply(lambda x: x.tz_convert(area_from.tz))\n df['end'] = df['end'].apply(lambda x: x.tz_convert(area_from.tz))\n df = df.truncate(before=start, after=end)\n return df\n\n def query_withdrawn_unavailability_of_generation_units(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n df = self.query_unavailability_of_generation_units(\n country_code=country_code, start=start, end=end, docstatus='A13')\n df = df.truncate(before=start, after=end)\n return df\n\n @day_limited\n def query_generation_per_plant(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp, psr_type: Optional[str] = None,\n include_eic: bool = False,\n nett: bool = False, **kwargs) -> pd.DataFrame:\n \"\"\"\n Parameters\n ----------\n country_code : Area|str\n start : pd.Timestamp\n end : pd.Timestamp\n psr_type : str\n filter on a single psr type\n nett : bool\n condense generation and consumption into a nett number\n include_eic: bool\n if True also include the eic code in the output\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n area = lookup_area(country_code)\n text = super(EntsoePandasClient, self).query_generation_per_plant(\n country_code=area, start=start, end=end, psr_type=psr_type)\n df = parse_generation(text, per_plant=True, include_eic=include_eic)\n df.columns = df.columns.set_levels(df.columns.levels[0].str.encode('latin-1').str.decode('utf-8'), level=0)\n df = df.tz_convert(area.tz)\n # Truncation will fail if data is not sorted along the index in rare\n # cases. Ensure the dataframe is sorted:\n df = df.sort_index(0)\n df = df.truncate(before=start, after=end)\n return df\n\n def query_import(self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> pd.DataFrame:\n \"\"\"\n Adds together all incoming cross-border flows to a country\n The neighbours of a country are given by the NEIGHBOURS mapping\n \"\"\"\n area = lookup_area(country_code)\n imports = []\n for neighbour in NEIGHBOURS[area.name]:\n try:\n im = self.query_crossborder_flows(country_code_from=neighbour,\n country_code_to=country_code,\n end=end,\n start=start,\n lookup_bzones=True)\n except NoMatchingDataError:\n continue\n im.name = neighbour\n imports.append(im)\n df = pd.concat(imports, axis=1)\n # drop columns that contain only zero's\n df = df.loc[:, (df != 0).any(axis=0)]\n df = df.tz_convert(area.tz)\n df = df.truncate(before=start, after=end)\n return df\n\n def query_generation_import(\n self, country_code: Union[Area, str], start: pd.Timestamp,\n end: pd.Timestamp) -> pd.DataFrame:\n \"\"\"Query the combination of both domestic generation and imports\"\"\"\n generation = self.query_generation(country_code=country_code, end=end,\n start=start, lookup_bzones=True)\n generation = generation.loc[:, (generation != 0).any(\n axis=0)] # drop columns that contain only zero's\n generation = generation.resample('H').sum()\n imports = self.query_import(country_code=country_code, start=start,\n end=end)\n\n data = {f'Generation': generation, f'Import': imports}\n df = pd.concat(data.values(), axis=1, keys=data.keys())\n df = df.truncate(before=start, after=end)\n return df\n\n" ]
[ [ "pandas.tseries.offsets.YearBegin", "pandas.tseries.offsets.YearEnd", "pandas.concat" ] ]
noemiefedon/BELLA
[ "ca86e5cd6f593478235c64aa4d0409b0e78dbcbb" ]
[ "src/BELLA/results.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nClass for the results of an optimisation with BELLA\r\nwith several ply-drop layout\r\n\"\"\"\r\n__version__ = '2.0'\r\n__author__ = 'Noemie Fedon'\r\n\r\nimport numpy as np\r\n\r\n#import sys\r\n#sys.path.append(r'C:\\BELLA')\r\n#from src.divers.pretty_print import print_lampam, print_ss, print_list_ss\r\n\r\nclass BELLA_Results():\r\n \" An object for storing the results of an optimisation with BELLA\"\r\n\r\n def __init__(self, constraints, multipanel, parameters=None):\r\n \"Initialise the results of an optimisation with BELLA\"\r\n\r\n if parameters is None:\r\n n_ini_ply_drops = 1\r\n else:\r\n n_ini_ply_drops = parameters.n_ini_ply_drops\r\n\r\n self.penalty_spacing_tab = np.NaN*np.ones((\r\n n_ini_ply_drops,), dtype=float)\r\n\r\n self.obj_constraints_tab = np.NaN*np.ones((\r\n n_ini_ply_drops,), dtype=float)\r\n\r\n self.obj_no_constraints_tab = np.NaN*np.ones((\r\n n_ini_ply_drops, multipanel.n_panels), dtype=float)\r\n\r\n self.penalty_contig_tab = np.NaN*np.ones((\r\n n_ini_ply_drops, multipanel.n_panels), dtype=float)\r\n\r\n self.penalty_diso_tab = np.NaN*np.ones((\r\n n_ini_ply_drops, multipanel.n_panels), dtype=float)\r\n\r\n self.penalty_10_tab = np.NaN*np.ones((\r\n n_ini_ply_drops, multipanel.n_panels), dtype=float)\r\n\r\n self.penalty_bal_ipo_tab = np.NaN*np.ones((\r\n n_ini_ply_drops, multipanel.n_panels), dtype=float)\r\n\r\n self.penalty_oopo_tab = np.NaN*np.ones((\r\n n_ini_ply_drops, multipanel.n_panels), dtype=float)\r\n\r\n self.n_contig_tab = np.NaN*np.ones((\r\n n_ini_ply_drops, multipanel.n_panels), dtype=int)\r\n\r\n self.n_diso_tab = np.NaN*np.ones((\r\n n_ini_ply_drops, multipanel.n_panels), dtype=int)\r\n\r\n self.n_obj_func_calls_tab = np.NaN*np.ones((\r\n n_ini_ply_drops,), int)\r\n\r\n self.n_designs_last_level_tab = np.NaN*np.ones((\r\n n_ini_ply_drops,), int)\r\n\r\n self.n_designs_after_ss_ref_repair_tab = np.NaN*np.ones((\r\n n_ini_ply_drops,), int)\r\n\r\n self.n_designs_after_thick_to_thin_tab = np.NaN*np.ones((\r\n n_ini_ply_drops,), int)\r\n\r\n self.n_designs_after_thin_to_thick_tab = np.NaN*np.ones((\r\n n_ini_ply_drops,), int)\r\n\r\n self.n_designs_repaired_unique_tab = np.NaN*np.ones((\r\n n_ini_ply_drops,), int)\r\n\r\n self.lampam_tab_tab = np.NaN*np.zeros((\r\n multipanel.n_panels, n_ini_ply_drops, 12), float)\r\n\r\n self.n_plies_per_angle_tab = np.NaN*np.zeros((\r\n n_ini_ply_drops, multipanel.n_panels,\r\n constraints.n_set_of_angles), float)\r\n\r\n # Initialisation of the array storing all the best stacking sequence\r\n # solutions: ss_void\r\n ss_void = []\r\n for panel in multipanel.panels:\r\n ss_void.append(np.zeros((panel.n_plies,), dtype=int))\r\n # Initialisation of the array storing all the stacking sequence solutions:\r\n # ss_tab\r\n self.ss_tab = [[]]*(n_ini_ply_drops)\r\n for outer_step in range(n_ini_ply_drops):\r\n self.ss_tab[outer_step] = ss_void\r\n # Initialisation of the array storing all the stacking sequence tables:\r\n # ss_tab_tab\r\n if constraints.sym \\\r\n and multipanel.n_plies_max % 2 == 0 \\\r\n and sum([p.middle_ply_index for p in multipanel.panels]) != 0:\r\n self.ss_tab_tab = np.zeros((\r\n n_ini_ply_drops,\r\n multipanel.n_panels,\r\n multipanel.n_plies_max + 1), dtype=int)\r\n else:\r\n self.ss_tab_tab = np.zeros((\r\n n_ini_ply_drops,\r\n multipanel.n_panels,\r\n multipanel.n_plies_max), dtype=int)\r\n\r\n def update(self, outer_step, results_one_pdl):\r\n \"Update the results from an optimisation with one ply-drop layout\"\r\n if results_one_pdl is not None:\r\n self.ss_tab[outer_step] = results_one_pdl.ss\r\n self.ss_tab_tab[outer_step] = results_one_pdl.sst\r\n \r\n self.lampam_tab_tab[:, outer_step, :] = results_one_pdl.lampam\r\n \r\n self.obj_constraints_tab[\r\n outer_step] = results_one_pdl.obj_constraints\r\n self.obj_no_constraints_tab[\r\n outer_step] = results_one_pdl.obj_no_constraints\r\n \r\n self.penalty_spacing_tab[\r\n outer_step] = results_one_pdl.penalty_spacing\r\n self.penalty_diso_tab[\r\n outer_step] = results_one_pdl.penalty_diso\r\n self.penalty_contig_tab[\r\n outer_step] = results_one_pdl.penalty_contig\r\n self.penalty_10_tab[\r\n outer_step] = results_one_pdl.penalty_10\r\n self.penalty_bal_ipo_tab[\r\n outer_step] = results_one_pdl.penalty_bal_ipo\r\n self.penalty_oopo_tab[\r\n outer_step] = results_one_pdl.penalty_oopo\r\n self.n_diso_tab[outer_step] = results_one_pdl.n_diso\r\n self.n_contig_tab[outer_step] = results_one_pdl.n_contig\r\n \r\n self.n_plies_per_angle_tab[\r\n outer_step] = results_one_pdl.n_plies_per_angle\r\n \r\n self.n_obj_func_calls_tab[\r\n outer_step] = results_one_pdl.n_obj_func_calls\r\n self.n_designs_last_level_tab[\r\n outer_step] = results_one_pdl.n_designs_last_level\r\n self.n_designs_after_ss_ref_repair_tab[\r\n outer_step] = results_one_pdl.n_designs_after_ss_ref_repair\r\n self.n_designs_after_thick_to_thin_tab[\r\n outer_step] = results_one_pdl.n_designs_after_thick_to_thin\r\n self.n_designs_after_thin_to_thick_tab[\r\n outer_step] = results_one_pdl.n_designs_after_thin_to_thick\r\n self.n_designs_repaired_unique_tab[\r\n outer_step] = results_one_pdl.n_designs_repaired_unique\r\n\r\n def __repr__(self):\r\n \" Display object \"\r\n\r\n return '''\r\nResults with BELLA:\r\n '''\r\n\r\nclass BELLA_ResultsOnePdl():\r\n \" An object for storing the results of an optimisation with BELLA\"\r\n\r\n def __init__(self):\r\n \"Initialise the results of an optimisation with BELLA\"\r\n self.ss = None\r\n self.lampam = None\r\n self.n_plies_per_angle = None\r\n self.n_obj_func_calls = None\r\n self.obj_constraints = None\r\n self.obj_no_constraints = None\r\n self.penalty_diso = None\r\n self.penalty_contig = None\r\n self.penalty_10 = None\r\n self.penalty_bal_ipo = None\r\n self.penalty_oopo = None\r\n self.penalty_spacing = None\r\n self.sst = None\r\n self.pdl = None\r\n self.n_designs_last_level = 0\r\n self.n_designs_after_ss_ref_repair = 0\r\n self.n_designs_after_thick_to_thin = 0\r\n self.n_designs_after_thin_to_thick = 0\r\n self.n_designs_repaired_unique = 0\r\n\r\n\r\n def __repr__(self):\r\n \" Display object \"\r\n\r\n return f'''\r\nResults with BELLA:\r\n\r\n***\r\n'''\r\n" ]
[ [ "numpy.ones", "numpy.zeros" ] ]
lao-jiu/Facecluster
[ "1603c99ae9f1f1d1d37f60ea30b20dde6cd4568e" ]
[ "workflow/mtcnn_detector.py" ]
[ "# coding: utf-8\nimport os\nimport mxnet as mx\nimport numpy as np\nimport math\nimport cv2\nfrom multiprocessing import Pool\nfrom itertools import repeat\ntry:\n from itertools import izip\nexcept ImportError:\n izip = zip\n\nfrom helper import nms, adjust_input, generate_bbox, detect_first_stage_warpper\n\nclass MtcnnDetector(object):\n \"\"\"\n Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Neural Networks\n see https://github.com/kpzhang93/MTCNN_face_detection_alignment\n this is a mxnet version\n \"\"\"\n def __init__(self,\n model_folder='.',\n minsize = 12,\n threshold = [0.6, 0.7, 0.8],\n factor = 0.709,\n num_worker = 1,\n accurate_landmark = False,\n ctx=mx.cpu()):\n \"\"\"\n Initialize the detector\n\n Parameters:\n ----------\n model_folder : string\n path for the models\n minsize : float number\n minimal face to detect\n threshold : float number\n detect threshold for 3 stages\n factor: float number\n scale factor for image pyramid\n num_worker: int number\n number of processes we use for first stage\n accurate_landmark: bool\n use accurate landmark localization or not\n\n \"\"\"\n self.num_worker = num_worker\n self.accurate_landmark = accurate_landmark\n\n # load 4 models from folder\n models = ['det1', 'det2', 'det3','det4']\n models = [ os.path.join(model_folder, f) for f in models]\n \n self.PNets = []\n for i in range(num_worker):\n workner_net = mx.model.FeedForward.load(models[0], 1, ctx=ctx)\n self.PNets.append(workner_net)\n\n #self.Pool = Pool(num_worker)\n\n self.RNet = mx.model.FeedForward.load(models[1], 1, ctx=ctx)\n self.ONet = mx.model.FeedForward.load(models[2], 1, ctx=ctx)\n self.LNet = mx.model.FeedForward.load(models[3], 1, ctx=ctx)\n\n self.minsize = float(minsize)\n self.factor = float(factor)\n self.threshold = threshold\n\n\n def convert_to_square(self, bbox):\n \"\"\"\n convert bbox to square\n\n Parameters:\n ----------\n bbox: numpy array , shape n x 5\n input bbox\n\n Returns:\n -------\n square bbox\n \"\"\"\n square_bbox = bbox.copy()\n\n h = bbox[:, 3] - bbox[:, 1] + 1\n w = bbox[:, 2] - bbox[:, 0] + 1\n max_side = np.maximum(h,w)\n square_bbox[:, 0] = bbox[:, 0] + w*0.5 - max_side*0.5\n square_bbox[:, 1] = bbox[:, 1] + h*0.5 - max_side*0.5\n square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1\n square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1\n return square_bbox\n\n def calibrate_box(self, bbox, reg):\n \"\"\"\n calibrate bboxes\n\n Parameters:\n ----------\n bbox: numpy array, shape n x 5\n input bboxes\n reg: numpy array, shape n x 4\n bboxex adjustment\n\n Returns:\n -------\n bboxes after refinement\n\n \"\"\"\n w = bbox[:, 2] - bbox[:, 0] + 1\n w = np.expand_dims(w, 1)\n h = bbox[:, 3] - bbox[:, 1] + 1\n h = np.expand_dims(h, 1)\n reg_m = np.hstack([w, h, w, h])\n aug = reg_m * reg\n bbox[:, 0:4] = bbox[:, 0:4] + aug\n return bbox\n\n \n def pad(self, bboxes, w, h):\n \"\"\"\n pad the the bboxes, alse restrict the size of it\n\n Parameters:\n ----------\n bboxes: numpy array, n x 5\n input bboxes\n w: float number\n width of the input image\n h: float number\n height of the input image\n Returns :\n ------s\n dy, dx : numpy array, n x 1\n start point of the bbox in target image\n edy, edx : numpy array, n x 1\n end point of the bbox in target image\n y, x : numpy array, n x 1\n start point of the bbox in original image\n ex, ex : numpy array, n x 1\n end point of the bbox in original image\n tmph, tmpw: numpy array, n x 1\n height and width of the bbox\n\n \"\"\"\n tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1\n num_box = bboxes.shape[0]\n\n dx , dy= np.zeros((num_box, )), np.zeros((num_box, ))\n edx, edy = tmpw.copy()-1, tmph.copy()-1\n\n x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]\n\n tmp_index = np.where(ex > w-1)\n edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]\n ex[tmp_index] = w - 1\n\n tmp_index = np.where(ey > h-1)\n edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]\n ey[tmp_index] = h - 1\n\n tmp_index = np.where(x < 0)\n dx[tmp_index] = 0 - x[tmp_index]\n x[tmp_index] = 0\n\n tmp_index = np.where(y < 0)\n dy[tmp_index] = 0 - y[tmp_index]\n y[tmp_index] = 0\n\n return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]\n return_list = [item.astype(np.int32) for item in return_list]\n\n return return_list\n\n def slice_index(self, number):\n \"\"\"\n slice the index into (n,n,m), m < n\n Parameters:\n ----------\n number: int number\n number\n \"\"\"\n def chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n num_list = range(number)\n return list(chunks(num_list, self.num_worker))\n \n def detect_face_limited(self, img, det_type=2):\n height, width, _ = img.shape\n if det_type>=2:\n total_boxes = np.array( [ [0.0, 0.0, img.shape[1], img.shape[0], 0.9] ] ,dtype=np.float32)\n num_box = total_boxes.shape[0]\n\n # pad the bbox\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)\n # (3, 24, 24) is the input shape for RNet\n input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)\n\n for i in range(num_box):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)\n tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]\n input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))\n\n output = self.RNet.predict(input_buf)\n\n # filter the total_boxes with threshold\n passed = np.where(output[1][:, 1] > self.threshold[1])\n total_boxes = total_boxes[passed]\n\n if total_boxes.size == 0:\n return None\n\n total_boxes[:, 4] = output[1][passed, 1].reshape((-1,))\n reg = output[0][passed]\n\n # nms\n pick = nms(total_boxes, 0.7, 'Union')\n total_boxes = total_boxes[pick]\n total_boxes = self.calibrate_box(total_boxes, reg[pick])\n total_boxes = self.convert_to_square(total_boxes)\n total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])\n else:\n total_boxes = np.array( [ [0.0, 0.0, img.shape[1], img.shape[0], 0.9] ] ,dtype=np.float32)\n num_box = total_boxes.shape[0]\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)\n # (3, 48, 48) is the input shape for ONet\n input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)\n\n for i in range(num_box):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)\n tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]\n input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))\n\n output = self.ONet.predict(input_buf)\n #print(output[2])\n\n # filter the total_boxes with threshold\n passed = np.where(output[2][:, 1] > self.threshold[2])\n total_boxes = total_boxes[passed]\n\n if total_boxes.size == 0:\n return None\n\n total_boxes[:, 4] = output[2][passed, 1].reshape((-1,))\n reg = output[1][passed]\n points = output[0][passed]\n\n # compute landmark points\n bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1\n bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1\n points[:, 0:5] = np.expand_dims(total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]\n points[:, 5:10] = np.expand_dims(total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]\n\n # nms\n total_boxes = self.calibrate_box(total_boxes, reg)\n pick = nms(total_boxes, 0.7, 'Min')\n total_boxes = total_boxes[pick]\n points = points[pick]\n \n if not self.accurate_landmark:\n return total_boxes, points\n\n #############################################\n # extended stage\n #############################################\n num_box = total_boxes.shape[0]\n patchw = np.maximum(total_boxes[:, 2]-total_boxes[:, 0]+1, total_boxes[:, 3]-total_boxes[:, 1]+1)\n patchw = np.round(patchw*0.25)\n\n # make it even\n patchw[np.where(np.mod(patchw,2) == 1)] += 1\n\n input_buf = np.zeros((num_box, 15, 24, 24), dtype=np.float32)\n for i in range(5):\n x, y = points[:, i], points[:, i+5]\n x, y = np.round(x-0.5*patchw), np.round(y-0.5*patchw)\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(np.vstack([x, y, x+patchw-1, y+patchw-1]).T,\n width,\n height)\n for j in range(num_box):\n tmpim = np.zeros((tmpw[j], tmpw[j], 3), dtype=np.float32)\n tmpim[dy[j]:edy[j]+1, dx[j]:edx[j]+1, :] = img[y[j]:ey[j]+1, x[j]:ex[j]+1, :]\n input_buf[j, i*3:i*3+3, :, :] = adjust_input(cv2.resize(tmpim, (24, 24)))\n\n output = self.LNet.predict(input_buf)\n\n pointx = np.zeros((num_box, 5))\n pointy = np.zeros((num_box, 5))\n\n for k in range(5):\n # do not make a large movement\n tmp_index = np.where(np.abs(output[k]-0.5) > 0.35)\n output[k][tmp_index[0]] = 0.5\n\n pointx[:, k] = np.round(points[:, k] - 0.5*patchw) + output[k][:, 0]*patchw\n pointy[:, k] = np.round(points[:, k+5] - 0.5*patchw) + output[k][:, 1]*patchw\n\n points = np.hstack([pointx, pointy])\n points = points.astype(np.int32)\n\n return total_boxes, points\n\n def detect_face(self, img, det_type=0):\n \"\"\"\n detect face over img\n Parameters:\n ----------\n img: numpy array, bgr order of shape (1, 3, n, m)\n input image\n Retures:\n -------\n bboxes: numpy array, n x 5 (x1,y2,x2,y2,score)\n bboxes\n points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)\n landmarks\n \"\"\"\n\n # check input\n height, width, _ = img.shape\n if det_type==0:\n MIN_DET_SIZE = 12\n\n if img is None:\n return None\n\n # only works for color image\n if len(img.shape) != 3:\n return None\n\n # detected boxes\n total_boxes = []\n\n minl = min( height, width)\n\n # get all the valid scales\n scales = []\n m = MIN_DET_SIZE/self.minsize\n minl *= m\n factor_count = 0\n while minl > MIN_DET_SIZE:\n scales.append(m*self.factor**factor_count)\n minl *= self.factor\n factor_count += 1\n\n #############################################\n # first stage\n #############################################\n #for scale in scales:\n # return_boxes = self.detect_first_stage(img, scale, 0)\n # if return_boxes is not None:\n # total_boxes.append(return_boxes)\n \n sliced_index = self.slice_index(len(scales))\n total_boxes = []\n for batch in sliced_index:\n #local_boxes = self.Pool.map( detect_first_stage_warpper, \\\n # izip(repeat(img), self.PNets[:len(batch)], [scales[i] for i in batch], repeat(self.threshold[0])) )\n local_boxes = map( detect_first_stage_warpper, \\\n izip(repeat(img), self.PNets[:len(batch)], [scales[i] for i in batch], repeat(self.threshold[0])) )\n total_boxes.extend(local_boxes)\n \n # remove the Nones \n total_boxes = [ i for i in total_boxes if i is not None]\n\n if len(total_boxes) == 0:\n return None\n \n total_boxes = np.vstack(total_boxes)\n\n if total_boxes.size == 0:\n return None\n\n # merge the detection from first stage\n pick = nms(total_boxes[:, 0:5], 0.7, 'Union')\n total_boxes = total_boxes[pick]\n\n bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1\n bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1\n\n # refine the bboxes\n total_boxes = np.vstack([total_boxes[:, 0]+total_boxes[:, 5] * bbw,\n total_boxes[:, 1]+total_boxes[:, 6] * bbh,\n total_boxes[:, 2]+total_boxes[:, 7] * bbw,\n total_boxes[:, 3]+total_boxes[:, 8] * bbh,\n total_boxes[:, 4]\n ])\n\n total_boxes = total_boxes.T\n total_boxes = self.convert_to_square(total_boxes)\n total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])\n else:\n total_boxes = np.array( [ [0.0, 0.0, img.shape[1], img.shape[0], 0.9] ] ,dtype=np.float32)\n\n #############################################\n # second stage\n #############################################\n num_box = total_boxes.shape[0]\n\n # pad the bbox\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)\n # (3, 24, 24) is the input shape for RNet\n input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)\n\n for i in range(num_box):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)\n tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]\n input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))\n\n output = self.RNet.predict(input_buf)\n\n # filter the total_boxes with threshold\n passed = np.where(output[1][:, 1] > self.threshold[1])\n total_boxes = total_boxes[passed]\n\n if total_boxes.size == 0:\n return None\n\n total_boxes[:, 4] = output[1][passed, 1].reshape((-1,))\n reg = output[0][passed]\n\n # nms\n pick = nms(total_boxes, 0.7, 'Union')\n total_boxes = total_boxes[pick]\n total_boxes = self.calibrate_box(total_boxes, reg[pick])\n total_boxes = self.convert_to_square(total_boxes)\n total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])\n\n #############################################\n # third stage\n #############################################\n num_box = total_boxes.shape[0]\n\n # pad the bbox\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)\n # (3, 48, 48) is the input shape for ONet\n input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)\n\n for i in range(num_box):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)\n tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]\n input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))\n\n output = self.ONet.predict(input_buf)\n\n # filter the total_boxes with threshold\n passed = np.where(output[2][:, 1] > self.threshold[2])\n total_boxes = total_boxes[passed]\n\n if total_boxes.size == 0:\n return None\n\n total_boxes[:, 4] = output[2][passed, 1].reshape((-1,))\n reg = output[1][passed]\n points = output[0][passed]\n\n # compute landmark points\n bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1\n bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1\n points[:, 0:5] = np.expand_dims(total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]\n points[:, 5:10] = np.expand_dims(total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]\n\n # nms\n total_boxes = self.calibrate_box(total_boxes, reg)\n pick = nms(total_boxes, 0.7, 'Min')\n total_boxes = total_boxes[pick]\n points = points[pick]\n \n if not self.accurate_landmark:\n return total_boxes, points\n\n #############################################\n # extended stage\n #############################################\n num_box = total_boxes.shape[0]\n patchw = np.maximum(total_boxes[:, 2]-total_boxes[:, 0]+1, total_boxes[:, 3]-total_boxes[:, 1]+1)\n patchw = np.round(patchw*0.25)\n\n # make it even\n patchw[np.where(np.mod(patchw,2) == 1)] += 1\n\n input_buf = np.zeros((num_box, 15, 24, 24), dtype=np.float32)\n for i in range(5):\n x, y = points[:, i], points[:, i+5]\n x, y = np.round(x-0.5*patchw), np.round(y-0.5*patchw)\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(np.vstack([x, y, x+patchw-1, y+patchw-1]).T,\n width,\n height)\n for j in range(num_box):\n tmpim = np.zeros((tmpw[j], tmpw[j], 3), dtype=np.float32)\n tmpim[dy[j]:edy[j]+1, dx[j]:edx[j]+1, :] = img[y[j]:ey[j]+1, x[j]:ex[j]+1, :]\n input_buf[j, i*3:i*3+3, :, :] = adjust_input(cv2.resize(tmpim, (24, 24)))\n\n output = self.LNet.predict(input_buf)\n\n pointx = np.zeros((num_box, 5))\n pointy = np.zeros((num_box, 5))\n\n for k in range(5):\n # do not make a large movement\n tmp_index = np.where(np.abs(output[k]-0.5) > 0.35)\n output[k][tmp_index[0]] = 0.5\n\n pointx[:, k] = np.round(points[:, k] - 0.5*patchw) + output[k][:, 0]*patchw\n pointy[:, k] = np.round(points[:, k+5] - 0.5*patchw) + output[k][:, 1]*patchw\n\n points = np.hstack([pointx, pointy])\n points = points.astype(np.int32)\n\n return total_boxes, points\n\n\n\n def list2colmatrix(self, pts_list):\n \"\"\"\n convert list to column matrix\n Parameters:\n ----------\n pts_list:\n input list\n Retures:\n -------\n colMat: \n\n \"\"\"\n assert len(pts_list) > 0\n colMat = []\n for i in range(len(pts_list)):\n colMat.append(pts_list[i][0])\n colMat.append(pts_list[i][1])\n colMat = np.matrix(colMat).transpose()\n return colMat\n\n def find_tfrom_between_shapes(self, from_shape, to_shape):\n \"\"\"\n find transform between shapes\n Parameters:\n ----------\n from_shape: \n to_shape: \n Retures:\n -------\n tran_m:\n tran_b:\n \"\"\"\n assert from_shape.shape[0] == to_shape.shape[0] and from_shape.shape[0] % 2 == 0\n\n sigma_from = 0.0\n sigma_to = 0.0\n cov = np.matrix([[0.0, 0.0], [0.0, 0.0]])\n\n # compute the mean and cov\n from_shape_points = from_shape.reshape(from_shape.shape[0]/2, 2)\n to_shape_points = to_shape.reshape(to_shape.shape[0]/2, 2)\n mean_from = from_shape_points.mean(axis=0)\n mean_to = to_shape_points.mean(axis=0)\n\n for i in range(from_shape_points.shape[0]):\n temp_dis = np.linalg.norm(from_shape_points[i] - mean_from)\n sigma_from += temp_dis * temp_dis\n temp_dis = np.linalg.norm(to_shape_points[i] - mean_to)\n sigma_to += temp_dis * temp_dis\n cov += (to_shape_points[i].transpose() - mean_to.transpose()) * (from_shape_points[i] - mean_from)\n\n sigma_from = sigma_from / to_shape_points.shape[0]\n sigma_to = sigma_to / to_shape_points.shape[0]\n cov = cov / to_shape_points.shape[0]\n\n # compute the affine matrix\n s = np.matrix([[1.0, 0.0], [0.0, 1.0]])\n u, d, vt = np.linalg.svd(cov)\n\n if np.linalg.det(cov) < 0:\n if d[1] < d[0]:\n s[1, 1] = -1\n else:\n s[0, 0] = -1\n r = u * s * vt\n c = 1.0\n if sigma_from != 0:\n c = 1.0 / sigma_from * np.trace(np.diag(d) * s)\n\n tran_b = mean_to.transpose() - c * r * mean_from.transpose()\n tran_m = c * r\n\n return tran_m, tran_b\n\n def extract_image_chips(self, img, points, desired_size=256, padding=0):\n \"\"\"\n crop and align face\n Parameters:\n ----------\n img: numpy array, bgr order of shape (1, 3, n, m)\n input image\n points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)\n desired_size: default 256\n padding: default 0\n Retures:\n -------\n crop_imgs: list, n\n cropped and aligned faces \n \"\"\"\n crop_imgs = []\n for p in points:\n shape =[]\n for k in range(len(p)/2):\n shape.append(p[k])\n shape.append(p[k+5])\n\n if padding > 0:\n padding = padding\n else:\n padding = 0\n # average positions of face points\n mean_face_shape_x = [0.224152, 0.75610125, 0.490127, 0.254149, 0.726104]\n mean_face_shape_y = [0.2119465, 0.2119465, 0.628106, 0.780233, 0.780233]\n\n from_points = []\n to_points = []\n\n for i in range(len(shape)/2):\n x = (padding + mean_face_shape_x[i]) / (2 * padding + 1) * desired_size\n y = (padding + mean_face_shape_y[i]) / (2 * padding + 1) * desired_size\n to_points.append([x, y])\n from_points.append([shape[2*i], shape[2*i+1]])\n\n # convert the points to Mat\n from_mat = self.list2colmatrix(from_points)\n to_mat = self.list2colmatrix(to_points)\n\n # compute the similar transfrom\n tran_m, tran_b = self.find_tfrom_between_shapes(from_mat, to_mat)\n\n probe_vec = np.matrix([1.0, 0.0]).transpose()\n probe_vec = tran_m * probe_vec\n\n scale = np.linalg.norm(probe_vec)\n angle = 180.0 / math.pi * math.atan2(probe_vec[1, 0], probe_vec[0, 0])\n\n from_center = [(shape[0]+shape[2])/2.0, (shape[1]+shape[3])/2.0]\n to_center = [0, 0]\n to_center[1] = desired_size * 0.4\n to_center[0] = desired_size * 0.5\n\n ex = to_center[0] - from_center[0]\n ey = to_center[1] - from_center[1]\n\n rot_mat = cv2.getRotationMatrix2D((from_center[0], from_center[1]), -1*angle, scale)\n rot_mat[0][2] += ex\n rot_mat[1][2] += ey\n\n chips = cv2.warpAffine(img, rot_mat, (desired_size, desired_size))\n crop_imgs.append(chips)\n\n return crop_imgs\n\n" ]
[ [ "numpy.vstack", "numpy.linalg.norm", "numpy.array", "numpy.zeros", "numpy.diag", "numpy.matrix", "numpy.linalg.det", "numpy.abs", "numpy.linalg.svd", "numpy.mod", "numpy.hstack", "numpy.expand_dims", "numpy.maximum", "numpy.where", "numpy.round" ] ]
braineniac/adapt_kalman
[ "a29a0740420959d2221d548a414031c17fa25cf8" ]
[ "scripts/experiments.py" ]
[ "#!/usr/bin/env python\n\n# Copyright (c) 2019 Daniel Hammer. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom kalman_estimator import KalmanFilter\nfrom kalman_estimator import SysIO, KalmanEstimator\nfrom kalman_estimator import StateEstimator, EstimationPlots\n\n\nclass Experiment(object):\n\n def __init__(self,\n sys_IO=None,\n kalman_filter=None,\n slice=(0, np.inf), legend=[]):\n if not isinstance(sys_IO, SysIO):\n raise ValueError(\"Passed bag_sys_IO not a BagSysIO!\")\n if not isinstance(kalman_filter, KalmanFilter):\n raise ValueError(\"Passed kalman_filter not a KalmanFilter!\")\n self._sys_IO = sys_IO\n self._kalman_filter = kalman_filter\n self._slice = slice\n self._legend = legend\n\n def _get_estimation(self):\n state_estimator = KalmanEstimator(self._kalman_filter)\n state_estimator.set_stamped_input(self._sys_IO.get_input())\n state_estimator.set_stamped_output(self._sys_IO.get_output())\n return state_estimator\n\n def get_estimation_plots(self):\n estimation = self._get_estimation()\n estimation_plotter = EstimationPlots(estimation,\n self._slice, self._legend)\n return estimation_plotter\n\n\nclass NoRotationExperiment(Experiment):\n def __init__(self,\n sys_io=None,\n kalman_filter=None,\n slice=(0, np.inf), legend=[]):\n super(NoRotationExperiment, self).__init__(sys_io,\n kalman_filter,\n slice, legend)\n\n def _get_estimation(self):\n state_estimator = KalmanEstimator(self._kalman_filter)\n state_estimator.set_stamped_input(self._sys_IO.get_input())\n state_estimator.set_stamped_output(self._sys_IO.get_output())\n state_estimator.set_u1y1_zero()\n return state_estimator\n\n\nclass SimExperiment(Experiment):\n\n def __init__(self,\n sim=None,\n slice=(0, np.inf), legend=[]):\n self._sim = sim\n self._slice = slice\n self._legend = legend\n\n def _get_estimation(self):\n state_estimator = StateEstimator()\n state_estimator.set_stamped_input(self._sim.get_input())\n state_estimator.set_stamped_output(self._sim.get_output())\n state_estimator.set_stamped_states(self._sim.get_states())\n state_estimator.set_stamped_Q(self._sim.get_Q())\n return state_estimator\n\n\nclass ExperimentSuite(object):\n\n def __init__(self, name=\"\"):\n self._name = name\n self._experiments = []\n\n def plot(self):\n experiment_plotter = ExperimentPlotter(self._experiments)\n experiment_plotter.plot()\n\n def export(self):\n for i in range(len(self._experiments)):\n estimation_plots = self._experiments[i].get_estimation_plots()\n estimation_plots.export_input(self._name + str(i) + \"_\")\n estimation_plots.export_output(self._name + str(i) + \"_\")\n estimation_plots.export_states(self._name + str(i) + \"_\")\n estimation_plots.export_x0x1(self._name + str(i) + \"_\")\n estimation_plots.export_Q(self._name + str(i) + \"_\")\n\n\nclass ExperimentPlotter(object):\n figure = 1\n\n @staticmethod\n def add_figure():\n plt.figure(ExperimentPlotter.figure)\n ExperimentPlotter.figure += 1\n\n @staticmethod\n def _add_plot(stamped_plot=None, dimension=None, option=None, legend=None):\n if not stamped_plot:\n raise ValueError\n else:\n t, plot = stamped_plot\n if dimension is not None:\n plot = plot[dimension]\n if not option or not legend:\n plt.plot(t, plot)\n else:\n plt.plot(t, plot, option, label=legend)\n plt.legend()\n\n def __init__(self, experiments=None):\n if not isinstance(experiments, list):\n raise ValueError(\"Pass a list of Experiment!\")\n if not all(isinstance(exp, Experiment) for exp in experiments):\n raise ValueError(\"Pass a list only containing Experiment!\")\n self._experiments = experiments\n self._all_estimation_plots = []\n self._options = [\"b\", \"r\", \"k\", \"m\", \"g\"]\n\n def plot(self):\n for experiment in self._experiments:\n estimation_plots = experiment.get_estimation_plots()\n self._all_estimation_plots.append(estimation_plots)\n self._plot_input_figure()\n self._plot_output_figure()\n self._plot_states_figure()\n self._plot_xy_state_figure()\n self._plot_Q_figure()\n plt.show()\n\n def _plot_input_figure(self):\n self.add_figure()\n input_titles = self._all_estimation_plots[0].get_input_titles()\n for i in range(len(input_titles)):\n plt.subplot(len(input_titles) * 100 + 10 + 1 + i)\n plt.ylabel(input_titles[i])\n plt.xlabel(\"Time [s]\")\n for estimation_plots, option in \\\n zip(self._all_estimation_plots, self._options):\n legend = estimation_plots.get_legend()\n self._add_plot(estimation_plots.get_input_plot(),\n i,\n option,\n legend)\n\n def _plot_output_figure(self):\n self.add_figure()\n output_titles = self._all_estimation_plots[0].get_output_titles()\n for i in range(len(output_titles)):\n plt.subplot(len(output_titles) * 100 + 10 + 1 + i)\n plt.ylabel(output_titles[i])\n plt.xlabel(\"Time [s]\")\n for estimation_plots, option in \\\n zip(self._all_estimation_plots, self._options):\n legend = estimation_plots.get_legend()\n self._add_plot(estimation_plots.get_output_plot(),\n i,\n option,\n legend)\n\n def _plot_states_figure(self):\n self.add_figure()\n states_titles = self._all_estimation_plots[0].get_states_titles()\n for i in range(len(states_titles)):\n plt.subplot(len(states_titles) * 100 + 10 + 1 + i)\n plt.ylabel(states_titles[i])\n plt.xlabel(\"Time [s]\")\n for estimation_plots, option in \\\n zip(self._all_estimation_plots, self._options):\n legend = estimation_plots.get_legend()\n self._add_plot(estimation_plots.get_states_plot(),\n i,\n option,\n legend)\n\n def _plot_xy_state_figure(self):\n self.add_figure()\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n for estimation_plots, option in \\\n zip(self._all_estimation_plots, self._options):\n legend = estimation_plots.get_legend()\n self._add_plot(estimation_plots.get_x0x1_plot(),\n None,\n option,\n legend)\n\n def _plot_Q_figure(self):\n self.add_figure()\n Q_titles = self._all_estimation_plots[0].get_Q_titles()\n for i in range(len(Q_titles)):\n plt.subplot(len(Q_titles) * 100 + 10 + 1 + i)\n plt.xlabel(\"Time [s]\")\n plt.ylabel(Q_titles[i])\n for estimation_plots, option in \\\n zip(self._all_estimation_plots, self._options):\n legend = estimation_plots.get_legend()\n self._add_plot(estimation_plots.get_Q_plot(),\n i,\n option,\n legend)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
gbmarc1/Ax
[ "9428fa64a621cf4562c7e2c63881a0ca2fa2780b" ]
[ "setup.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport subprocess\n\nimport numpy\nfrom Cython.Build import cythonize\nfrom setuptools import find_packages, setup\nfrom setuptools.extension import Extension\n\n\nEXTENSIONS = [\n Extension(\n \"ax.utils.stats.sobol\",\n [\"ax/utils/stats/sobol.pyx\"],\n include_dirs=[numpy.get_include()],\n )\n]\n\nREQUIRES = [\n \"botorch>=0.1.3\",\n \"jinja2\", # also a Plotly dep\n \"pandas\",\n \"scipy\",\n \"sklearn\",\n \"plotly\",\n]\n\n# pytest-cov requires pytest >= 3.6\nDEV_REQUIRES = [\n \"beautifulsoup4\",\n \"black\",\n \"flake8\",\n \"pytest>=3.6\",\n \"pytest-cov\",\n \"sphinx\",\n \"sphinx-autodoc-typehints\",\n]\n\nMYSQL_REQUIRES = [\"SQLAlchemy>=1.1.13\"]\n\nNOTEBOOK_REQUIRES = [\"jupyter\"]\n\n\ndef get_git_version(abbreviate: bool = False) -> str:\n \"\"\"Gets the latest Git tag (as a string), e.g. 0.1.2.\n\n Note that `git describe --tags` works as follows:\n - Finds the most recent tag that is reachable from a commit.\n - If the tag points to the commit, then only the tag is shown.\n - Otherwise, it suffixes the tag name with the number of additional commits\n on top of the tag, and the abbreviated name of the most recent commit,\n e.g. 0.1.2-9-g2118b21. If you add `--abbrev=0`, this suffix is removed.\n This behavior is controlled by the `abbrev` parameter.\n \"\"\"\n cmd = [\"git\", \"describe\", \"--tags\"]\n if abbreviate:\n cmd.append(\"--abbrev=0\")\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n return out.strip().decode(\"ascii\")\n except (subprocess.SubprocessError, OSError):\n return \"Unknown\"\n\n\ndef write_version_py(version: str) -> None:\n \"\"\"Write the current package version to a Python file (ax/version.py)\n\n This file will be imported by ax/__init__.py, so that users can determine\n the current version by running `from ax import __version__`.\n \"\"\"\n content = \"\"\"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n# THIS FILE IS GENERATED FROM AX SETUP.PY\n\nversion = \"%s\"\n\"\"\"\n f = open(\"ax/version.py\", \"w\")\n try:\n f.write(content % version)\n finally:\n f.close()\n return version\n\n\ndef setup_package() -> None:\n \"\"\"Used for installing the Ax package.\n\n First, we determine the current version by getting the latest tag from Git.\n We write this version to a file (ax/version.py), which is imported by\n __init__.py. We also pass this version to setuptools below.\n \"\"\"\n\n # Grab current version from Git\n # Abbreviated version (e.g. 0.1.2) will be used by setuptools\n # Unabbreviated version (e.g. 0.1.2-9-g2118b21) will be used by __init__.py\n abbreviated_version = get_git_version(abbreviate=True)\n version = get_git_version(abbreviate=False)\n\n # Write unabbreviated version to version.py\n write_version_py(version)\n\n with open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n setup(\n name=\"ax-platform\",\n version=abbreviated_version,\n description=\"Adaptive Experimentation\",\n author=\"Facebook, Inc.\",\n license=\"MIT\",\n url=\"https://github.com/facebook/Ax\",\n keywords=[\"Experimentation\", \"Optimization\"],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n python_requires=\">=3.6\",\n setup_requires=[\"cython\", \"numpy\"],\n install_requires=REQUIRES,\n packages=find_packages(),\n ext_modules=cythonize(EXTENSIONS),\n package_data={\n # include all js, css, and html files in the package\n \"\": [\"*.js\", \"*.css\", \"*.html\"]\n },\n extras_require={\n \"dev\": DEV_REQUIRES,\n \"mysql\": MYSQL_REQUIRES,\n \"notebook\": NOTEBOOK_REQUIRES,\n },\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n" ]
[ [ "numpy.get_include" ] ]
seralexger/fairlearn
[ "c3ee7b5a45eb3394fc1b8d17b991e3d970970c05" ]
[ "test/unit/metrics/test_metrics_engine_dicts.py" ]
[ "# Copyright (c) Microsoft Corporation and Fairlearn contributors.\n# Licensed under the MIT License.\n\nimport pytest\nimport numpy as np\n\nimport fairlearn.metrics as metrics\n\n# ======================================================\n\ny_true = [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\ny_pred = [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\nsf_binary = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]\n\nmetric_group_summary_results = {\n metrics.true_positive_rate_group_summary: {\n \"overall\": 0.75, \"by_group\": {0: 1, 1: 0.6}},\n metrics.true_negative_rate_group_summary: {\n \"overall\": 0.7, \"by_group\": {0: 0.66666667, 1: 0.75}},\n metrics.false_positive_rate_group_summary: {\n \"overall\": 0.3, \"by_group\": {0: 0.33333333, 1: 0.25}},\n metrics.false_negative_rate_group_summary: {\n \"overall\": 0.25, \"by_group\": {0: 0, 1: 0.4}},\n metrics._root_mean_squared_error_group_summary: {\n \"overall\": 0.52704628, \"by_group\": {0: 0.47140452, 1: 0.57735027}},\n metrics._balanced_root_mean_squared_error_group_summary: {\n \"overall\": 0.52386128, \"by_group\": {0: 0.28867513, 1: 0.56622777}},\n metrics.mean_prediction_group_summary: {\n \"overall\": 0.5, \"by_group\": {0: 0.55555556, 1: 0.44444444}},\n metrics.selection_rate_group_summary: {\n \"overall\": 0.5, \"by_group\": {0: 0.55555556, 1: 0.44444444}},\n metrics._mean_overprediction_group_summary: {\n \"overall\": 0.16666667, \"by_group\": {0: 0.22222222, 1: 0.11111111}},\n metrics._mean_underprediction_group_summary: {\n \"overall\": 0.11111111, \"by_group\": {0: -0, 1: 0.22222222}},\n metrics.accuracy_score_group_summary: {\n \"overall\": 0.72222222, \"by_group\": {0: 0.77777778, 1: 0.66666667}},\n metrics.balanced_accuracy_score_group_summary: {\n \"overall\": 0.725, \"by_group\": {0: 0.83333333, 1: 0.675}},\n metrics.confusion_matrix_group_summary: {\n 'overall': np.array([[7, 3], [2, 6]]),\n 'by_group': {0: np.array([[4, 2], [0, 3]]), 1: np.array([[3, 1], [2, 3]])}},\n metrics.precision_score_group_summary: {\n \"overall\": 0.66666667, \"by_group\": {0: 0.6, 1: 0.75}},\n metrics.recall_score_group_summary: {\n \"overall\": 0.75, \"by_group\": {0: 1, 1: 0.6}},\n metrics.roc_auc_score_group_summary: {\n \"overall\": 0.725, \"by_group\": {0: 0.83333333, 1: 0.675}},\n metrics.zero_one_loss_group_summary: {\n \"overall\": 0.27777778, \"by_group\": {0: 0.22222222, 1: 0.33333333}},\n metrics.mean_absolute_error_group_summary: {\n \"overall\": 0.27777778, \"by_group\": {0: 0.22222222, 1: 0.33333333}},\n metrics.mean_squared_error_group_summary: {\n \"overall\": 0.27777778, \"by_group\": {0: 0.22222222, 1: 0.33333333}},\n metrics.r2_score_group_summary: {\n \"overall\": -0.125, \"by_group\": {0: 0, 1: -0.35}},\n metrics.f1_score_group_summary: {\n \"overall\": 0.70588235, \"by_group\": {0: 0.75, 1: 0.66666666}},\n metrics.log_loss_group_summary: {\n \"overall\": 9.59423782, \"by_group\": {0: 7.67546133, 1: 11.51301430}},\n}\n\nderived_metric_results = {\n metrics.true_positive_rate_difference: 0.4,\n metrics.true_positive_rate_ratio: 0.6,\n metrics.true_negative_rate_difference: 0.083333333,\n metrics.true_negative_rate_ratio: 0.88888889,\n metrics.false_positive_rate_difference: 0.083333333,\n metrics.false_positive_rate_ratio: 0.75,\n metrics.false_negative_rate_difference: 0.4,\n metrics.false_negative_rate_ratio: 0,\n metrics.selection_rate_difference: 0.11111111,\n metrics.selection_rate_ratio: 0.8,\n metrics.accuracy_score_difference: 0.11111111,\n metrics.accuracy_score_ratio: 0.85714286,\n metrics.accuracy_score_group_min: 0.66666667,\n metrics.zero_one_loss_difference: 0.11111111,\n metrics.zero_one_loss_ratio: 0.66666667,\n metrics.zero_one_loss_group_max: 0.33333333,\n metrics.balanced_accuracy_score_group_min: 0.675,\n metrics.precision_score_group_min: 0.6,\n metrics.recall_score_group_min: 0.6,\n metrics.roc_auc_score_group_min: 0.675,\n metrics.mean_absolute_error_group_max: 0.33333333,\n metrics.mean_squared_error_group_max: 0.33333333,\n metrics.r2_score_group_min: -0.35,\n metrics.f1_score_group_max: 0.75,\n metrics.log_loss_group_min: 7.67546133,\n}\n\nmetric_group_summary_pos_label_keys = [\n metrics.true_positive_rate_group_summary,\n metrics.true_negative_rate_group_summary,\n metrics.false_positive_rate_group_summary,\n metrics.false_negative_rate_group_summary,\n metrics.selection_rate_group_summary,\n metrics.precision_score_group_summary,\n metrics.recall_score_group_summary,\n metrics.f1_score_group_summary,\n]\n\n\n# =======================================================\n\ndef test_dict_sizes():\n assert len(metrics._metric_group_summary_dict) == len(metric_group_summary_results)\n assert len(metrics._derived_metric_dict) == len(derived_metric_results)\n\n\[email protected](\"func\", metric_group_summary_results.keys())\ndef test_metric_group_summary_smoke(func):\n result = func(y_true, y_pred, sensitive_features=sf_binary)\n assert result.overall == pytest.approx(metric_group_summary_results[func][\"overall\"])\n assert len(result.by_group) == 2\n assert result.by_group[0] == pytest.approx(metric_group_summary_results[func][\"by_group\"][0])\n assert result.by_group[1] == pytest.approx(metric_group_summary_results[func][\"by_group\"][1])\n\n\[email protected](\"func\", derived_metric_results.keys())\ndef test_derived_metrics_smoke(func):\n result = func(y_true, y_pred, sensitive_features=sf_binary)\n assert result == pytest.approx(derived_metric_results[func])\n\n\[email protected](\"func\", metric_group_summary_pos_label_keys)\ndef test_metric_group_summary_pos_label_0(func):\n # We're going to set pos_label=0, so for simplicity invert the previous inputs\n y_true_invert = [1-y for y in y_true]\n y_pred_invert = [1-y for y in y_pred]\n result = func(y_true_invert, y_pred_invert, sensitive_features=sf_binary, pos_label=0)\n assert result.overall == pytest.approx(metric_group_summary_results[func][\"overall\"])\n assert len(result.by_group) == 2\n assert result.by_group[0] == pytest.approx(metric_group_summary_results[func][\"by_group\"][0])\n assert result.by_group[1] == pytest.approx(metric_group_summary_results[func][\"by_group\"][1])\n" ]
[ [ "numpy.array" ] ]
j35tor/federated
[ "d92bfa6b8e3c9ebbac51ff7a3a180c2baaa08730" ]
[ "tensorflow_federated/python/core/impl/tensorflow_context/tensorflow_serialization_test.py" ]
[ "# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import serialization_utils\nfrom tensorflow_federated.python.common_libs import test_utils\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import test_case\nfrom tensorflow_federated.python.core.impl.context_stack import context_stack_impl\nfrom tensorflow_federated.python.core.impl.tensorflow_context import tensorflow_serialization\nfrom tensorflow_federated.python.core.impl.types import type_serialization\n\n\ndef _tf_computation_serializer(fn, parameter_type, context):\n serializer = tensorflow_serialization.tf_computation_serializer(\n parameter_type, context)\n arg_to_fn = next(serializer)\n result = fn(arg_to_fn)\n return serializer.send(result)\n\n\nclass TensorFlowSerializationTest(test_case.TestCase):\n\n def test_serialize_tensorflow_with_no_parameter(self):\n comp, extra_type_spec = _tf_computation_serializer(\n lambda _: tf.constant(99), None, context_stack_impl.context_stack)\n self.assertEqual(\n str(type_serialization.deserialize_type(comp.type)), '( -> int32)')\n self.assertEqual(str(extra_type_spec), '( -> int32)')\n self.assertEqual(comp.WhichOneof('computation'), 'tensorflow')\n results = tf.compat.v1.Session().run(\n tf.import_graph_def(\n serialization_utils.unpack_graph_def(comp.tensorflow.graph_def),\n None, [comp.tensorflow.result.tensor.tensor_name]))\n self.assertEqual(results, [99])\n\n def test_serialize_tensorflow_with_table_no_variables(self):\n\n def table_lookup(word):\n table = tf.lookup.StaticVocabularyTable(\n tf.lookup.KeyValueTensorInitializer(['a', 'b', 'c'],\n np.arange(3, dtype=np.int64)),\n num_oov_buckets=1)\n return table.lookup(word)\n\n comp, extra_type_spec = _tf_computation_serializer(\n table_lookup,\n computation_types.TensorType(dtype=tf.string, shape=(None,)),\n context_stack_impl.context_stack)\n self.assertEqual(\n str(type_serialization.deserialize_type(comp.type)),\n '(string[?] -> int64[?])')\n self.assertEqual(str(extra_type_spec), '(string[?] -> int64[?])')\n self.assertEqual(comp.WhichOneof('computation'), 'tensorflow')\n\n with tf.Graph().as_default() as g:\n tf.import_graph_def(\n serialization_utils.unpack_graph_def(comp.tensorflow.graph_def),\n name='')\n with tf.compat.v1.Session(graph=g) as sess:\n sess.run(fetches=comp.tensorflow.initialize_op)\n results = sess.run(\n fetches=comp.tensorflow.result.tensor.tensor_name,\n feed_dict={\n comp.tensorflow.parameter.tensor.tensor_name: ['b', 'c', 'a']\n })\n self.assertAllEqual(results, [1, 2, 0])\n\n @test_utils.graph_mode_test\n def test_serialize_tensorflow_with_simple_add_three_lambda(self):\n comp, extra_type_spec = _tf_computation_serializer(\n lambda x: x + 3, computation_types.TensorType(tf.int32),\n context_stack_impl.context_stack)\n self.assertEqual(\n str(type_serialization.deserialize_type(comp.type)), '(int32 -> int32)')\n self.assertEqual(str(extra_type_spec), '(int32 -> int32)')\n self.assertEqual(comp.WhichOneof('computation'), 'tensorflow')\n parameter = tf.constant(1000)\n results = tf.compat.v1.Session().run(\n tf.import_graph_def(\n serialization_utils.unpack_graph_def(comp.tensorflow.graph_def),\n {comp.tensorflow.parameter.tensor.tensor_name: parameter},\n [comp.tensorflow.result.tensor.tensor_name]))\n self.assertEqual(results, [1003])\n\n @test_utils.graph_mode_test\n def test_serialize_tensorflow_with_structured_type_signature(self):\n batch_type = collections.namedtuple('BatchType', ['x', 'y'])\n output_type = collections.namedtuple('OutputType', ['A', 'B'])\n comp, extra_type_spec = _tf_computation_serializer(\n lambda z: output_type(2.0 * tf.cast(z.x, tf.float32), 3.0 * z.y),\n computation_types.StructWithPythonType([('x', tf.int32),\n ('y', (tf.float32, [2]))],\n batch_type),\n context_stack_impl.context_stack)\n self.assertEqual(\n str(type_serialization.deserialize_type(comp.type)),\n '(<x=int32,y=float32[2]> -> <A=float32,B=float32[2]>)')\n self.assertEqual(comp.WhichOneof('computation'), 'tensorflow')\n self.assertEqual(\n str(extra_type_spec),\n '(<x=int32,y=float32[2]> -> <A=float32,B=float32[2]>)')\n self.assertIsInstance(extra_type_spec.parameter,\n computation_types.StructWithPythonType)\n self.assertIs(extra_type_spec.parameter.python_container, batch_type)\n self.assertIsInstance(extra_type_spec.result,\n computation_types.StructWithPythonType)\n self.assertIs(extra_type_spec.result.python_container, output_type)\n\n @test_utils.graph_mode_test\n def test_serialize_tensorflow_with_data_set_sum_lambda(self):\n\n def _legacy_dataset_reducer_example(ds):\n return ds.reduce(np.int64(0), lambda x, y: x + y)\n\n comp, extra_type_spec = _tf_computation_serializer(\n _legacy_dataset_reducer_example,\n computation_types.SequenceType(tf.int64),\n context_stack_impl.context_stack)\n self.assertEqual(\n str(type_serialization.deserialize_type(comp.type)),\n '(int64* -> int64)')\n self.assertEqual(str(extra_type_spec), '(int64* -> int64)')\n self.assertEqual(comp.WhichOneof('computation'), 'tensorflow')\n parameter = tf.data.Dataset.range(5)\n results = tf.compat.v1.Session().run(\n tf.import_graph_def(\n serialization_utils.unpack_graph_def(comp.tensorflow.graph_def), {\n comp.tensorflow.parameter.sequence.variant_tensor_name:\n tf.data.experimental.to_variant(parameter)\n }, [comp.tensorflow.result.tensor.tensor_name]))\n self.assertEqual(results, [10])\n\n\nif __name__ == '__main__':\n test_case.main()\n" ]
[ [ "tensorflow.compat.v1.Session", "tensorflow.data.Dataset.range", "numpy.int64", "numpy.arange", "tensorflow.Graph", "tensorflow.data.experimental.to_variant", "tensorflow.cast", "tensorflow.constant" ] ]
alessiomora/federated
[ "3b501067ed7062aaec3cc8830aaec0a7cf8f0942", "3b501067ed7062aaec3cc8830aaec0a7cf8f0942" ]
[ "tensorflow_federated/python/simulation/datasets/file_per_user_client_data.py", "tensorflow_federated/python/learning/optimizers/optimizer_test.py" ]
[ "# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementations of `ClientData` backed by a file system.\"\"\"\n\nimport collections\nimport os.path\nfrom typing import Callable, Mapping\n\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.simulation.datasets import client_data\nfrom tensorflow_federated.python.tensorflow_libs import tensor_utils\n\n\nclass FilePerUserClientData(client_data.SerializableClientData):\n \"\"\"A `tff.simulation.datasets.ClientData` that maps a set of files to a dataset.\n\n This mapping is restricted to one file per user.\n \"\"\"\n\n def __init__(self, client_ids_to_files: Mapping[str, str],\n dataset_fn: Callable[[str], tf.data.Dataset]):\n \"\"\"Constructs a `tff.simulation.datasets.ClientData` object.\n\n Args:\n client_ids_to_files: A mapping from string client IDs to filepaths\n containing the user's data.\n dataset_fn: A factory function that takes a filepath (must accept both\n strings and tensors) and returns a `tf.data.Dataset` corresponding to\n this path.\n \"\"\"\n py_typecheck.check_type(client_ids_to_files, collections.abc.Mapping)\n if not client_ids_to_files:\n raise ValueError('`client_ids` must have at least one client ID')\n py_typecheck.check_callable(dataset_fn)\n self._client_ids = sorted(client_ids_to_files.keys())\n\n # Creates a dataset in a manner that can be serialized by TF.\n def serializable_dataset_fn(client_id: str) -> tf.data.Dataset:\n client_ids_to_path = tf.lookup.StaticHashTable(\n tf.lookup.KeyValueTensorInitializer(\n list(client_ids_to_files.keys()),\n list(client_ids_to_files.values())), '')\n client_path = client_ids_to_path.lookup(client_id)\n return dataset_fn(client_path)\n\n self._serializable_dataset_fn = serializable_dataset_fn\n\n tf_dataset = serializable_dataset_fn(tf.constant(self._client_ids[0]))\n self._element_type_structure = tf_dataset.element_spec\n\n @property\n def serializable_dataset_fn(self):\n \"\"\"Creates a `tf.data.Dataset` for a client in a TF-serializable manner.\"\"\"\n return self._serializable_dataset_fn\n\n @property\n def client_ids(self):\n return self._client_ids\n\n def create_tf_dataset_for_client(self, client_id: str) -> tf.data.Dataset:\n \"\"\"Creates a new `tf.data.Dataset` containing the client training examples.\n\n This function will create a dataset for a given client if `client_id` is\n contained in the `client_ids` property of the `FilePerUserClientData`.\n Unlike `self.serializable_dataset_fn`, this method is not serializable.\n\n Args:\n client_id: The string identifier for the desired client.\n\n Returns:\n A `tf.data.Dataset` object.\n \"\"\"\n if client_id not in self.client_ids:\n raise ValueError(\n 'ID [{i}] is not a client in this ClientData. See '\n 'property `client_ids` for the list of valid ids.'.format(\n i=client_id))\n\n client_dataset = self.serializable_dataset_fn(tf.constant(client_id))\n tensor_utils.check_nested_equal(client_dataset.element_spec,\n self._element_type_structure)\n return client_dataset\n\n @property\n def element_type_structure(self):\n return self._element_type_structure\n\n @classmethod\n def create_from_dir(cls, path, create_tf_dataset_fn=tf.data.TFRecordDataset):\n \"\"\"Builds a `tff.simulation.datasets.FilePerUserClientData`.\n\n Iterates over all files in `path`, using the filename as the client ID. Does\n not recursively search `path`.\n\n Args:\n path: A directory path to search for per-client files.\n create_tf_dataset_fn: A callable that creates a `tf.data.Datasaet` object\n for a given file in the directory specified in `path`.\n\n Returns:\n A `tff.simulation.datasets.FilePerUserClientData` object.\n \"\"\"\n client_ids_to_paths_dict = {\n filename: os.path.join(path, filename)\n for filename in tf.io.gfile.listdir(path)\n }\n\n return FilePerUserClientData(client_ids_to_paths_dict, create_tf_dataset_fn)\n", "# Copyright 2021, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.core.api import test_case\nfrom tensorflow_federated.python.learning.optimizers import optimizer\n\n\nclass OptimizerChecksTest(test_case.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('zero', 0.0),\n ('negative', -1.0),\n ('none', None),\n ('not_float', '0.1'),\n )\n def test_check_learning_rate_raises(self, lr):\n with self.assertRaises((ValueError, TypeError)):\n optimizer.check_learning_rate(lr)\n\n @parameterized.named_parameters(\n ('zero', 0.0),\n ('negative', -1.0),\n ('one', 1.0),\n ('large', 42.0),\n ('none', None),\n ('not_float', '0.1'),\n )\n def test_check_momentum_raises(self, momentum):\n with self.assertRaises((ValueError, TypeError)):\n optimizer.check_momentum(momentum)\n\n @parameterized.named_parameters(\n ('bad_shape', tf.zeros([2], tf.float32), tf.zeros([3], tf.float32)),\n ('bad_dtype', tf.zeros([2], tf.float32), tf.zeros([2], tf.float64)),\n ('bad_structure', [tf.zeros([2]), tf.zeros([3])\n ], [tf.zeros([2]), [tf.zeros([3])]]),\n )\n def check_weights_gradients_match(self, weights, gradients):\n with self.assertRaises(ValueError):\n optimizer.check_weights_gradients_match(weights, gradients)\n\n\nif __name__ == '__main__':\n test_case.main()\n" ]
[ [ "tensorflow.constant", "tensorflow.io.gfile.listdir" ], [ "tensorflow.zeros" ] ]
keuntaeklee/pytorch-PPUU
[ "0ba8c953df9cdb1e9937e301ed3384ac6b66ea73" ]
[ "eval_fm.py" ]
[ "import argparse\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as fun\n\nimport utils\nfrom dataloader import DataLoader\n\nparser = argparse.ArgumentParser(fromfile_prefix_chars='@')\nparser.add_argument('-dataset', type=str, default='i80')\nparser.add_argument('-debug', action='store_true')\nparser.add_argument('-batch_size', type=int, default=4)\nparser.add_argument('-v', type=int, default=4)\nparser.add_argument('-display', type=int, default=0)\nparser.add_argument('-seed', type=int, default=9999)\nparser.add_argument('-lanes', type=int, default=8)\nparser.add_argument('-traffic_rate', type=int, default=15)\nparser.add_argument('-n_episodes', type=int, default=1)\nparser.add_argument('-ncond', type=int, default=20)\nparser.add_argument('-npred', type=int, default=200)\nparser.add_argument('-n_batches', type=int, default=200)\nparser.add_argument('-n_samples', type=int, default=10)\nparser.add_argument('-n_action_seq', type=int, default=5)\nparser.add_argument('-sampling', type=str, default='fp')\nparser.add_argument('-noise', type=float, default=0.0)\nparser.add_argument('-n_mixture', type=int, default=20)\nparser.add_argument('-graph_density', type=float, default=0.001)\nparser.add_argument('-model_dir', type=str, default='models/')\nM1 = 'model=fwd-cnn-layers=3-bsize=64-ncond=20-npred=20-lrt=0.0001-nfeature=256-dropout=0.1-gclip=5.0-' + \\\n 'warmstart=0-seed=1.step200000.model'\nM2 = 'model=fwd-cnn-vae-fp-layers=3-bsize=64-ncond=20-npred=20-lrt=0.0001-nfeature=256-dropout=0.1-nz=32-' + \\\n 'beta=1e-06-zdropout=0.0-gclip=5.0-warmstart=1-seed=1.step200000.model'\nM3 = 'model=fwd-cnn-vae-fp-layers=3-bsize=64-ncond=20-npred=20-lrt=0.0001-nfeature=256-dropout=0.1-nz=32-' + \\\n 'beta=1e-06-zdropout=0.5-gclip=5.0-warmstart=1-seed=1.step200000.model'\nM4 = 'model=fwd-cnn-ten3-layers=3-bsize=64-ncond=20-npred=20-lrt=0.0001-nfeature=256-nhidden=128-fgeom=1-' + \\\n 'zeroact=0-zmult=0-dropout=0.1-nz=32-beta=0.0-zdropout=0.5-gclip=5.0-warmstart=1-seed=1.step200000.model'\nparser.add_argument('-mfile', type=str, default=M3)\nparser.add_argument('-cuda', type=int, default=1)\nparser.add_argument('-save_video', type=int, default=1)\nopt = parser.parse_args()\n\nif 'zeroact=1' in opt.mfile:\n opt.zeroact = 1\nelse:\n opt.zeroact = 0\n\nrandom.seed(opt.seed)\nnp.random.seed(opt.seed)\ntorch.manual_seed(opt.seed)\n\nopt.save_video = (opt.save_video == 1)\nopt.eval_dir = opt.model_dir + f'eval/'\n\n\nprint(f'[loading {opt.model_dir + opt.mfile}]')\nmodel = torch.load(opt.model_dir + opt.mfile)\nif type(model) is dict: model = model['model']\nmodel = model.cuda()\nmodel.eval()\n# if opt.cuda == 1:\n # model.intype('gpu')\n\ndataloader = DataLoader(None, opt, opt.dataset)\n# model.opt.npred = opt.npred # instruct the model about how many predictions we want it to produce\nmodel.opt.alpha = 0\n\ndirname = f'{opt.eval_dir}{opt.mfile}-nbatches={opt.n_batches}-npred={opt.npred}-nsample={opt.n_samples}'\nif '-ten' in opt.mfile:\n dirname += f'-sampling={opt.sampling}'\n if opt.sampling == 'knn':\n dirname += f'-density={opt.graph_density}'\n elif opt.sampling == 'pdf':\n dirname += f'-nmixture={opt.n_mixture}'\n mfile_prior = f'{opt.model_dir}/{opt.mfile}-nfeature=128-lrt=0.0001-nmixture={opt.n_mixture}.prior'\n print(f'[loading prior model: {mfile_prior}]')\n model.prior = torch.load(mfile_prior).cuda()\n # load z vectors. Extract them if they are not already saved.\n pzfile = opt.model_dir + opt.mfile + '.pz'\n if os.path.isfile(pzfile):\n p_z = torch.load(pzfile)\n graph = torch.load(pzfile + '.graph')\n model.p_z = p_z\n model.knn_indx = graph.get('knn_indx')\n model.knn_dist = graph.get('knn_dist')\n model.opt.topz_sample = int(model.p_z.size(0) * opt.graph_density)\n else:\n model.compute_pz(dataloader, opt, 250)\n torch.save(model.p_z, pzfile)\n model.compute_z_graph()\n torch.save({'knn_dist': model.knn_dist, 'knn_indx': model.knn_indx}, pzfile + '.graph')\n print('[done]')\n\ndirname += '.eval'\nos.system('mkdir -p ' + dirname)\n\n# if opt.cuda == 1:\n# model.intype('gpu')\n\nloss_i = torch.zeros(opt.n_batches, opt.batch_size, opt.n_samples, opt.npred)\nloss_s = torch.zeros(opt.n_batches, opt.batch_size, opt.n_samples, opt.npred)\nloss_c = torch.zeros(opt.n_batches, opt.batch_size, opt.n_samples, opt.npred)\ntrue_costs = torch.zeros(opt.n_batches, opt.batch_size, opt.npred, 2)\npred_costs = torch.zeros(opt.n_batches, opt.batch_size, opt.n_samples, opt.npred, 2)\ntrue_states = torch.zeros(opt.n_batches, opt.batch_size, opt.npred, 4)\npred_states = torch.zeros(opt.n_batches, opt.batch_size, opt.n_samples, opt.npred, 4)\n\n\ndef compute_loss(targets, predictions, r=True):\n pred_images, pred_states, _ = predictions\n target_images, target_states, target_costs = targets\n loss_i = fun.mse_loss(pred_images, target_images, reduce=r)\n loss_s = fun.mse_loss(pred_states, target_states, reduce=r)\n loss_c = fun.mse_loss(pred_costs.cuda(), target_costs.cuda(), reduce=r)\n return loss_i, loss_s, loss_c\n\n\ndataloader.random.seed(12345)\n\nfor i in range(opt.n_batches):\n with torch.no_grad():\n torch.cuda.empty_cache()\n inputs, actions, targets, _, _ = dataloader.get_batch_fm('test', opt.npred)\n\n # save ground truth for the first 10 x batch_size samples\n if i < 10 and opt.save_video:\n for b in range(opt.batch_size):\n dirname_movie = f'{dirname}/videos/x{i * opt.batch_size + b:d}/y/'\n print(f'[saving ground truth video: {dirname_movie}]')\n utils.save_movie(dirname_movie, targets[0][b], targets[1][b], targets[2][b])\n\n for s in range(opt.n_samples):\n print(f'[batch {i}, sample {s}]', end=\"\\r\")\n\n if opt.zeroact == 1:\n actions.data.zero_()\n\n pred, _ = model(inputs, actions, targets, sampling=opt.sampling) # return as many predictions as actions\n pred_states[i, :, s].copy_(pred[1])\n true_states[i].copy_(targets[1])\n\n if i < 10 and s < 20 and opt.save_video:\n for b in range(opt.batch_size):\n dirname_movie = f'{dirname}/videos/sampled_z/true_actions/x{i * opt.batch_size + b:d}/z{s:d}/'\n print(f'[saving video: {dirname_movie}]', end=\"\\r\")\n utils.save_movie(dirname_movie, pred[0][b], pred[1][b]) # , pred_[2][b])\n # ^ images ^ position and velocity\n\n # rotate actions across the batch: a_{t} -> a_{t + 1}\n actions_rot = actions[(torch.arange(opt.batch_size) - 1) % opt.batch_size]\n\n # also generate videos with different action sequences\n pred_rot, _ = model(inputs, actions_rot, targets, sampling=opt.sampling)\n if i < 10 and s < 20 and opt.save_video:\n for b in range(opt.batch_size):\n dirname_movie = f'{dirname}/videos/sampled_z/rot_actions/x{i * opt.batch_size + b:d}/z{s:d}/'\n print('[saving video: {}]'.format(dirname_movie), end=\"\\r\")\n utils.save_movie(dirname_movie, pred_rot[0][b], pred_rot[1][b]) # , pred_perm[2][b])\n\n # also generate videos with true z vectors\n if s == 0:\n pred_true_z, _ = model(inputs, actions, targets)\n for b in range(opt.batch_size):\n dirname_movie = f'{dirname}/videos/true_z/true_actions/x{i * opt.batch_size + b:d}/z{s:d}/'\n print('[saving video: {}]'.format(dirname_movie), end=\"\\r\")\n utils.save_movie(dirname_movie, pred_true_z[0][b], pred_true_z[1][b]) # , pred_true_z[2][b])\n\n pred_true_z_rot, _ = model(inputs, actions_rot, targets)\n for b in range(opt.batch_size):\n dirname_movie = f'{dirname}/videos/true_z/rot_actions/x{i * opt.batch_size + b:d}/z{s:d}/'\n print('[saving video: {}]'.format(dirname_movie), end=\"\\r\")\n utils.save_movie(dirname_movie, pred_true_z_rot[0][b], pred_true_z_rot[1][b])\n # , pred_true_z_perm[2][b])\n\n # del inputs, actions, targets, pred\n\ntorch.save({'loss_i': loss_i,\n 'loss_s': loss_s,\n 'loss_c': loss_c,\n 'true_costs': true_costs,\n 'pred_costs': pred_costs,\n 'true_states': true_states,\n 'pred_states': pred_states},\n f'{dirname}/loss.pth')\n\nos.system(f'tar -cvf {dirname}.tgz {dirname}')\n" ]
[ [ "torch.cuda.empty_cache", "torch.nn.functional.mse_loss", "torch.load", "torch.manual_seed", "torch.save", "numpy.random.seed", "torch.no_grad", "torch.arange", "torch.zeros" ] ]
nils-werner/librosa
[ "3d57352e6f5b6da151a2dd5d303af985797800aa" ]
[ "tests/test_features.py" ]
[ "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom __future__ import print_function\nimport warnings\nimport numpy as np\n\nimport pytest\n\nimport librosa\n\nfrom test_core import load, srand\n\n# Disable cache\nimport os\ntry:\n os.environ.pop('LIBROSA_CACHE_DIR')\nexcept KeyError:\n pass\n\n__EXAMPLE_FILE = os.path.join('tests', 'data', 'test1_22050.wav')\nwarnings.resetwarnings()\nwarnings.simplefilter('always')\nwarnings.filterwarnings('module', '.*', FutureWarning, 'scipy.*')\n\n\n# utils submodule\[email protected]('slope', np.linspace(-2, 2, num=6))\[email protected]('xin', [np.vstack([np.arange(100.0)] * 3)])\[email protected]('order', [1, pytest.mark.xfail(0)])\[email protected]('width, axis', [pytest.mark.xfail((-1, 0)),\n pytest.mark.xfail((-1, 1)),\n pytest.mark.xfail((0, 0)),\n pytest.mark.xfail((0, 1)),\n pytest.mark.xfail((1, 0)),\n pytest.mark.xfail((1, 1)),\n pytest.mark.xfail((2, 0)),\n pytest.mark.xfail((2, 1)),\n (3, 0), (3, 1),\n pytest.mark.xfail((4, 0)),\n pytest.mark.xfail((4, 1)),\n (5, 1), pytest.mark.xfail((5, 0)),\n pytest.mark.xfail((6, 0)),\n pytest.mark.xfail((6, 1)),\n pytest.mark.xfail((7, 0)), (7, 1)])\[email protected]('bias', [-10, 0, 10])\ndef test_delta(xin, width, slope, order, axis, bias):\n\n x = slope * xin + bias\n\n # Note: this test currently only checks first-order differences\n# if width < 3 or np.mod(width, 2) != 1 or width > x.shape[axis]:\n# pytest.raises(librosa.ParameterError)\n\n delta = librosa.feature.delta(x,\n width=width,\n order=order,\n axis=axis)\n\n # Check that trimming matches the expected shape\n assert x.shape == delta.shape\n\n # Once we're sufficiently far into the signal (ie beyond half_len)\n # (x + delta)[t] should approximate x[t+1] if x is actually linear\n slice_orig = [slice(None)] * x.ndim\n slice_out = [slice(None)] * delta.ndim\n slice_orig[axis] = slice(width//2 + 1, -width//2 + 1)\n slice_out[axis] = slice(width//2, -width//2)\n assert np.allclose((x + delta)[tuple(slice_out)], x[tuple(slice_orig)])\n\n\ndef test_stack_memory():\n\n def __test(n_steps, delay, data):\n data_stack = librosa.feature.stack_memory(data,\n n_steps=n_steps,\n delay=delay)\n\n # If we're one-dimensional, reshape for testing\n if data.ndim == 1:\n data = data.reshape((1, -1))\n\n d, t = data.shape\n\n assert data_stack.shape[0] == n_steps * d\n assert data_stack.shape[1] == t\n\n assert np.allclose(data_stack[0], data[0])\n\n for i in range(d):\n for step in range(1, n_steps):\n if delay > 0:\n assert np.allclose(data[i, :- step * delay],\n data_stack[step * d + i, step * delay:])\n else:\n assert np.allclose(data[i, -step * delay:],\n data_stack[step * d + i, :step * delay])\n\n srand()\n\n for ndim in [1, 2]:\n data = np.random.randn(* ([5] * ndim))\n\n for n_steps in [-1, 0, 1, 2, 3, 4]:\n for delay in [-4, -2, -1, 0, 1, 2, 4]:\n tf = __test\n if n_steps < 1:\n tf = pytest.mark.xfail(__test, raises=librosa.ParameterError)\n if delay == 0:\n tf = pytest.mark.xfail(__test, raises=librosa.ParameterError)\n yield tf, n_steps, delay, data\n\n\n# spectral submodule\ndef test_spectral_centroid_synthetic():\n\n k = 5\n\n def __test(S, freq, sr, n_fft):\n cent = librosa.feature.spectral_centroid(S=S, freq=freq)\n\n if freq is None:\n freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n\n assert np.allclose(cent, freq[k])\n\n srand()\n # construct a fake spectrogram\n sr = 22050\n n_fft = 1024\n S = np.zeros((1 + n_fft // 2, 10))\n\n S[k, :] = 1.0\n\n yield __test, S, None, sr, n_fft\n\n freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n yield __test, S, freq, sr, n_fft\n\n # And if we modify the frequencies\n freq *= 3\n yield __test, S, freq, sr, n_fft\n\n # Or if we make up random frequencies for each frame\n freq = np.random.randn(*S.shape)\n yield __test, S, freq, sr, n_fft\n\n\ndef test_spectral_centroid_errors():\n\n @pytest.mark.xfail(raises=librosa.ParameterError)\n def __test(S):\n librosa.feature.spectral_centroid(S=S)\n\n S = - np.ones((513, 10))\n yield __test, S\n\n S = - np.ones((513, 10)) * 1.j\n yield __test, S\n\n\ndef test_spectral_centroid_empty():\n\n def __test(y, sr, S):\n cent = librosa.feature.spectral_centroid(y=y, sr=sr, S=S)\n assert not np.any(cent)\n\n sr = 22050\n y = np.zeros(3 * sr)\n yield __test, y, sr, None\n\n S = np.zeros((1025, 10))\n yield __test, None, sr, S\n\n\ndef test_spectral_bandwidth_synthetic():\n # This test ensures that a signal confined to a single frequency bin\n # always achieves 0 bandwidth\n k = 5\n\n def __test(S, freq, sr, n_fft, norm, p):\n bw = librosa.feature.spectral_bandwidth(S=S, freq=freq, norm=norm, p=p)\n\n assert not np.any(bw)\n\n srand()\n # construct a fake spectrogram\n sr = 22050\n n_fft = 1024\n S = np.zeros((1 + n_fft // 2, 10))\n S[k, :] = 1.0\n\n for norm in [False, True]:\n for p in [1, 2]:\n # With vanilla frequencies\n yield __test, S, None, sr, n_fft, norm, p\n\n # With explicit frequencies\n freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n yield __test, S, freq, sr, n_fft, norm, p\n\n # And if we modify the frequencies\n freq = 3 * librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n yield __test, S, freq, sr, n_fft, norm, p\n\n # Or if we make up random frequencies for each frame\n freq = np.random.randn(*S.shape)\n yield __test, S, freq, sr, n_fft, norm, p\n\n\ndef test_spectral_bandwidth_onecol():\n # This test checks for issue https://github.com/librosa/librosa/issues/552\n # failure when the spectrogram has a single column\n\n def __test(S, freq):\n bw = librosa.feature.spectral_bandwidth(S=S, freq=freq)\n\n assert bw.shape == (1, 1)\n\n k = 5\n\n srand()\n # construct a fake spectrogram\n sr = 22050\n n_fft = 1024\n S = np.zeros((1 + n_fft // 2, 1))\n S[k, :] = 1.0\n\n # With vanilla frequencies\n yield __test, S, None\n\n # With explicit frequencies\n freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n yield __test, S, freq\n\n # And if we modify the frequencies\n freq = 3 * librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n yield __test, S, freq\n\n # Or if we make up random frequencies for each frame\n freq = np.random.randn(*S.shape)\n yield __test, S, freq\n\n\ndef test_spectral_bandwidth_errors():\n\n @pytest.mark.xfail(raises=librosa.ParameterError)\n def __test(S):\n librosa.feature.spectral_bandwidth(S=S)\n\n S = - np.ones((513, 10))\n yield __test, S\n\n S = - np.ones((513, 10)) * 1.j\n yield __test, S\n\n\ndef test_spectral_rolloff_synthetic():\n\n srand()\n\n sr = 22050\n n_fft = 2048\n\n def __test(S, freq, pct):\n\n rolloff = librosa.feature.spectral_rolloff(S=S, sr=sr, freq=freq,\n roll_percent=pct)\n\n if freq is None:\n freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n\n idx = np.floor(pct * freq.shape[0]).astype(int)\n assert np.allclose(rolloff, freq[idx])\n\n S = np.ones((1 + n_fft // 2, 10))\n\n for pct in [0.25, 0.5, 0.95]:\n # Implicit frequencies\n yield __test, S, None, pct\n\n # Explicit frequencies\n freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n yield __test, S, freq, pct\n\n # And time-varying frequencies\n freq = np.cumsum(np.abs(np.random.randn(*S.shape)), axis=0)\n yield __test, S, freq, pct\n\n\ndef test_spectral_rolloff_errors():\n\n @pytest.mark.xfail(raises=librosa.ParameterError)\n def __test(S, p):\n librosa.feature.spectral_rolloff(S=S, roll_percent=p)\n\n S = - np.ones((513, 10))\n yield __test, S, 0.95\n\n S = - np.ones((513, 10)) * 1.j\n yield __test, S, 0.95\n\n S = np.ones((513, 10))\n yield __test, S, -1\n\n S = np.ones((513, 10))\n yield __test, S, 2\n\n\ndef test_spectral_contrast_log():\n # We already have a regression test for linear energy difference\n # This test just does a sanity-check on the log-scaled version\n\n y, sr = librosa.load(__EXAMPLE_FILE)\n\n contrast = librosa.feature.spectral_contrast(y=y, sr=sr, linear=False)\n\n assert not np.any(contrast < 0)\n\n\ndef test_spectral_contrast_errors():\n\n @pytest.mark.xfail(raises=librosa.ParameterError)\n def __test(S, freq, fmin, n_bands, quantile):\n librosa.feature.spectral_contrast(S=S,\n freq=freq,\n fmin=fmin,\n n_bands=n_bands,\n quantile=quantile)\n\n S = np.ones((1025, 10))\n\n # ill-shaped frequency set: scalar\n yield __test, S, 0, 200, 6, 0.02\n\n # ill-shaped frequency set: wrong-length vector\n yield __test, S, np.zeros((S.shape[0]+1,)), 200, 6, 0.02\n\n # ill-shaped frequency set: matrix\n yield __test, S, np.zeros(S.shape), 200, 6, 0.02\n\n # negative fmin\n yield __test, S, None, -1, 6, 0.02\n\n # zero fmin\n yield __test, S, None, 0, 6, 0.02\n\n # negative n_bands\n yield __test, S, None, 200, -1, 0.02\n\n # bad quantile\n yield __test, S, None, 200, 6, -1\n\n # bad quantile\n yield __test, S, None, 200, 6, 2\n\n # bands exceed nyquist\n yield __test, S, None, 200, 7, 0.02\n\n\ndef test_spectral_flatness_synthetic():\n\n # to construct a spectrogram\n n_fft = 2048\n def __test(y, S, flatness_ref):\n flatness = librosa.feature.spectral_flatness(y=y,\n S=S,\n n_fft=2048,\n hop_length=512)\n assert np.allclose(flatness, flatness_ref)\n\n # comparison to a manual calculation result\n S = np.array([[1, 3], [2, 1], [1, 2]])\n flatness_ref = np.array([[0.7937005259, 0.7075558390]])\n yield __test, None, S, flatness_ref\n\n # ones\n S = np.ones((1 + n_fft // 2, 10))\n flatness_ones = np.ones((1, 10))\n yield __test, None, S, flatness_ones\n\n # zeros\n S = np.zeros((1 + n_fft // 2, 10))\n flatness_zeros = np.ones((1, 10))\n yield __test, None, S, flatness_zeros\n\n\ndef test_spectral_flatness_errors():\n\n @pytest.mark.xfail(raises=librosa.ParameterError)\n def __test(S, amin):\n librosa.feature.spectral_flatness(S=S,\n amin=amin)\n\n S = np.ones((1025, 10))\n\n # zero amin\n yield __test, S, 0\n\n # negative amin\n yield __test, S, -1\n\n\ndef test_rmse():\n\n def __test(n):\n S = np.ones((n, 5))\n\n # RMSE of an all-ones band is 1\n rmse = librosa.feature.rmse(S=S)\n\n assert np.allclose(rmse, np.ones_like(rmse))\n\n def __test_consistency(frame_length, hop_length, center):\n y, sr = librosa.load(__EXAMPLE_FILE, sr=None)\n\n # Ensure audio is divisible into frame size.\n y = librosa.util.fix_length(y, y.size - y.size % frame_length)\n assert y.size % frame_length == 0\n\n # STFT magnitudes with a constant windowing function and no centering.\n S = librosa.magphase(librosa.stft(y,\n n_fft=frame_length,\n hop_length=hop_length,\n window=np.ones,\n center=center))[0]\n\n # Try both RMS methods.\n rms1 = librosa.feature.rmse(S=S, frame_length=frame_length,\n hop_length=hop_length)\n rms2 = librosa.feature.rmse(y=y, frame_length=frame_length,\n hop_length=hop_length, center=center)\n\n assert rms1.shape == rms2.shape\n # Normalize envelopes.\n rms1 /= rms1.max()\n rms2 /= rms2.max()\n\n # Ensure results are similar.\n np.testing.assert_allclose(rms1, rms2, rtol=5e-2)\n\n for frame_length in [2048, 4096]:\n for hop_length in [128, 512, 1024]:\n for center in [False, True]:\n yield __test_consistency, frame_length, hop_length, center\n\n for n in range(10, 100, 10):\n yield __test, n\n\n\ndef test_zcr_synthetic():\n\n def __test_zcr(rate, y, frame_length, hop_length, center):\n zcr = librosa.feature.zero_crossing_rate(y,\n frame_length=frame_length,\n hop_length=hop_length,\n center=center)\n\n # We don't care too much about the edges if there's padding\n if center:\n zcr = zcr[:, frame_length//2:-frame_length//2]\n\n # We'll allow 1% relative error\n assert np.allclose(zcr, rate, rtol=1e-2)\n\n sr = 16384\n for period in [32, 16, 8, 4, 2]:\n y = np.ones(sr)\n y[::period] = -1\n # Every sign flip induces two crossings\n rate = 2./period\n # 1+2**k so that we get both sides of the last crossing\n for frame_length in [513, 2049]:\n for hop_length in [128, 256]:\n for center in [False, True]:\n yield __test_zcr, rate, y, frame_length, hop_length, center\n\n\ndef test_poly_features_synthetic():\n\n srand()\n sr = 22050\n n_fft = 2048\n\n def __test(S, coeffs, freq):\n\n order = coeffs.shape[0] - 1\n p = librosa.feature.poly_features(S=S, sr=sr, n_fft=n_fft,\n order=order, freq=freq)\n\n for i in range(S.shape[-1]):\n assert np.allclose(coeffs, p[::-1, i].squeeze())\n\n def __make_data(coeffs, freq):\n S = np.zeros_like(freq)\n for i, c in enumerate(coeffs):\n S = S + c * freq**i\n\n S = S.reshape((freq.shape[0], -1))\n return S\n\n for order in range(1, 3):\n freq = librosa.fft_frequencies(sr=sr, n_fft=n_fft)\n coeffs = np.atleast_1d(np.arange(1, 1+order))\n\n # First test: vanilla\n S = __make_data(coeffs, freq)\n yield __test, S, coeffs, None\n\n # And with explicit frequencies\n yield __test, S, coeffs, freq\n\n # And with alternate frequencies\n freq = freq**2.0\n S = __make_data(coeffs, freq)\n yield __test, S, coeffs, freq\n\n # And multi-dimensional\n freq = np.cumsum(np.abs(np.random.randn(1 + n_fft//2, 2)), axis=0)\n S = __make_data(coeffs, freq)\n yield __test, S, coeffs, freq\n\n\ndef test_tonnetz():\n y, sr = librosa.load(librosa.util.example_audio_file())\n tonnetz_chroma = np.load(os.path.join('tests', \"data\", \"feature-tonnetz-chroma.npy\"))\n tonnetz_msaf = np.load(os.path.join('tests', \"data\", \"feature-tonnetz-msaf.npy\"))\n\n # Use cqt chroma\n def __audio():\n tonnetz = librosa.feature.tonnetz(y=y, sr=sr)\n assert tonnetz.shape[0] == 6\n\n # Use pre-computed chroma\n def __stft():\n tonnetz = librosa.feature.tonnetz(chroma=tonnetz_chroma)\n assert tonnetz.shape[1] == tonnetz_chroma.shape[1]\n assert tonnetz.shape[0] == 6\n assert np.allclose(tonnetz_msaf, tonnetz)\n\n def __cqt():\n # Use high resolution cqt chroma\n chroma_cqt = librosa.feature.chroma_cqt(y=y, sr=sr, n_chroma=24)\n tonnetz = librosa.feature.tonnetz(chroma=chroma_cqt)\n assert tonnetz.shape[1] == chroma_cqt.shape[1]\n assert tonnetz.shape[0] == 6\n # Using stft chroma won't generally match cqt chroma\n # skip the equivalence check\n\n # Call the function with not enough parameters\n yield pytest.mark.xfail(librosa.feature.tonnetz, raises=librosa.ParameterError)\n yield __audio\n yield __stft\n yield __cqt\n\n\ndef test_tempogram_fail():\n\n @pytest.mark.xfail(raises=librosa.ParameterError)\n def __test(y, sr, onset_envelope, hop_length, win_length, center, window, norm):\n\n librosa.feature.tempogram(y=y,\n sr=sr,\n onset_envelope=onset_envelope,\n hop_length=hop_length,\n win_length=win_length,\n center=center,\n window=window,\n norm=norm)\n\n sr = 22050\n hop_length = 512\n duration = 10\n\n y = np.zeros(duration * sr)\n\n # Fail when no input is provided\n yield __test, None, sr, None, hop_length, 384, True, 'hann', np.inf\n\n # Fail when win_length is too small\n for win_length in [-384, -1, 0]:\n yield __test, y, sr, None, hop_length, win_length, True, 'hann', np.inf\n\n # Fail when len(window) != win_length\n yield __test, y, sr, None, hop_length, 384, True, np.ones(win_length + 1), np.inf\n\n\ndef test_tempogram_audio():\n\n def __test(y, sr, oenv, hop_length):\n\n # Get the tempogram from audio\n t1 = librosa.feature.tempogram(y=y, sr=sr,\n onset_envelope=None,\n hop_length=hop_length)\n\n # Get the tempogram from oenv\n t2 = librosa.feature.tempogram(y=None, sr=sr,\n onset_envelope=oenv,\n hop_length=hop_length)\n\n # Make sure it works when both are provided\n t3 = librosa.feature.tempogram(y=y, sr=sr,\n onset_envelope=oenv,\n hop_length=hop_length)\n\n # And that oenv overrides y\n t4 = librosa.feature.tempogram(y=0 * y, sr=sr,\n onset_envelope=oenv,\n hop_length=hop_length)\n\n assert np.allclose(t1, t2)\n assert np.allclose(t1, t3)\n assert np.allclose(t1, t4)\n\n y, sr = librosa.load(__EXAMPLE_FILE)\n\n for hop_length in [512, 1024]:\n oenv = librosa.onset.onset_strength(y=y,\n sr=sr,\n hop_length=hop_length)\n\n yield __test, y, sr, oenv, hop_length\n\n\ndef test_tempogram_odf():\n\n sr = 22050\n hop_length = 512\n duration = 8\n\n def __test_equiv(tempo, center):\n odf = np.zeros(duration * sr // hop_length)\n spacing = sr * 60. // (hop_length * tempo)\n odf[::int(spacing)] = 1\n\n odf_ac = librosa.autocorrelate(odf)\n\n tempogram = librosa.feature.tempogram(onset_envelope=odf,\n sr=sr,\n hop_length=hop_length,\n win_length=len(odf),\n window=np.ones,\n center=center,\n norm=None)\n\n idx = 0\n if center:\n idx = len(odf)//2\n\n assert np.allclose(odf_ac, tempogram[:, idx])\n\n # Generate a synthetic onset envelope\n def __test_peaks(tempo, win_length, window, norm):\n # Generate an evenly-spaced pulse train\n odf = np.zeros(duration * sr // hop_length)\n spacing = sr * 60. // (hop_length * tempo)\n odf[::int(spacing)] = 1\n\n tempogram = librosa.feature.tempogram(onset_envelope=odf,\n sr=sr,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n norm=norm)\n\n # Check the shape of the output\n assert tempogram.shape[0] == win_length\n\n assert tempogram.shape[1] == len(odf)\n\n # Mean over time to wash over the boundary padding effects\n idx = np.where(librosa.util.localmax(tempogram.max(axis=1)))[0]\n\n # Indices should all be non-zero integer multiples of spacing\n assert np.allclose(idx, spacing * np.arange(1, 1 + len(idx)))\n\n for tempo in [60, 90, 120, 160, 200]:\n for center in [False, True]:\n yield __test_equiv, tempo, center\n\n for win_length in [192, 384]:\n for window in ['hann', np.ones, np.ones(win_length)]:\n for norm in [None, 1, 2, np.inf]:\n yield __test_peaks, tempo, win_length, window, norm\n\n\ndef test_tempogram_odf_multi():\n\n sr = 22050\n hop_length = 512\n duration = 8\n\n # Generate a synthetic onset envelope\n def __test(center, win_length, window, norm):\n # Generate an evenly-spaced pulse train\n odf = np.zeros((10, duration * sr // hop_length))\n for i in range(10):\n spacing = sr * 60. // (hop_length * (60 + 12 * i))\n odf[i, ::int(spacing)] = 1\n\n tempogram = librosa.feature.tempogram(onset_envelope=odf,\n sr=sr,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n norm=norm)\n\n for i in range(10):\n tg_local = librosa.feature.tempogram(onset_envelope=odf[i],\n sr=sr,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n norm=norm)\n\n assert np.allclose(tempogram[i], tg_local)\n\n for center in [False, True]:\n for win_length in [192, 384]:\n for window in ['hann', np.ones, np.ones(win_length)]:\n for norm in [None, 1, 2, np.inf]:\n yield __test, center, win_length, window, norm\n\n\ndef test_cens():\n # load CQT data from Chroma Toolbox\n ct_cqt = load(os.path.join('tests', 'data', 'features-CT-cqt.mat'))\n\n fn_ct_chroma_cens = ['features-CT-CENS_9-2.mat',\n 'features-CT-CENS_21-5.mat',\n 'features-CT-CENS_41-1.mat']\n\n cens_params = [(9, 2), (21, 5), (41, 1)]\n\n for cur_test_case, cur_fn_ct_chroma_cens in enumerate(fn_ct_chroma_cens):\n win_len_smooth = cens_params[cur_test_case][0]\n downsample_smooth = cens_params[cur_test_case][1]\n\n # plug into librosa cens computation\n lr_chroma_cens = librosa.feature.chroma_cens(C=ct_cqt['f_cqt'],\n win_len_smooth=win_len_smooth,\n fmin=librosa.core.midi_to_hz(1),\n bins_per_octave=12,\n n_octaves=10)\n\n # leaving out frames to match chroma toolbox behaviour\n # lr_chroma_cens = librosa.resample(lr_chroma_cens, orig_sr=1, target_sr=1/downsample_smooth)\n lr_chroma_cens = lr_chroma_cens[:, ::downsample_smooth]\n\n # load CENS-41-1 features\n ct_chroma_cens = load(os.path.join('tests', 'data', cur_fn_ct_chroma_cens))\n\n maxdev = np.abs(ct_chroma_cens['f_CENS'] - lr_chroma_cens)\n assert np.allclose(ct_chroma_cens['f_CENS'], lr_chroma_cens, rtol=1e-15, atol=1e-15), maxdev\n\n\ndef test_mfcc():\n\n def __test(dct_type, norm, n_mfcc, S):\n\n E_total = np.sum(S, axis=0)\n\n mfcc = librosa.feature.mfcc(S=S, dct_type=dct_type, norm=norm, n_mfcc=n_mfcc)\n\n assert mfcc.shape[0] == n_mfcc\n assert mfcc.shape[1] == S.shape[1]\n\n # In type-2 mode, DC component should be constant over all frames\n if dct_type == 2:\n assert np.var(mfcc[0] / E_total) <= 1e-30\n\n S = librosa.power_to_db(np.random.randn(128, 100)**2, ref=np.max)\n\n for n_mfcc in [13, 20]:\n for dct_type in [1, 2, 3]:\n for norm in [None, 'ortho']:\n if dct_type == 1 and norm == 'ortho':\n tf = pytest.mark.xfail(__test, raises=NotImplementedError)\n else:\n tf = __test\n yield tf, dct_type, norm, n_mfcc, S\n" ]
[ [ "numpy.ones", "numpy.allclose", "numpy.zeros_like", "numpy.sum", "numpy.zeros", "numpy.var", "numpy.any", "numpy.random.randn", "numpy.abs", "numpy.ones_like", "numpy.floor", "numpy.arange", "numpy.testing.assert_allclose", "numpy.array", "numpy.linspace" ] ]
oliviermailletglas/project_csi4103
[ "b426f4fb8d909909dd7d1d954fd10d97891c33cb" ]
[ "brachiograph.py" ]
[ "# coding=utf-8\n\nfrom time import sleep\nimport readchar\nimport math\nimport numpy\nimport json\nimport pigpio\nfrom turtle_draw import BrachioGraphTurtle\n\n\ntry:\n pigpio.exceptions = False\n rpi = pigpio.pi()\n rpi.set_PWM_frequency(18, 50)\n pigpio.exceptions = True\n force_virtual = False\n\nexcept:\n print(\"pigpio daemon is not available; running in virtual mode\")\n force_virtual = True\n\n\nimport tqdm\n\n\nclass BrachioGraph:\n\n def __init__(\n self,\n\n # ----------------- geometry of the plotter -----------------\n\n inner_arm=8, # the lengths of the arms\n outer_arm=8,\n\n bounds=[-8, 4, 6, 13], # the maximum rectangular drawing area\n\n # ----------------- naive calculation values -----------------\n\n servo_1_parked_pw=1500, # pulse-widths when parked\n servo_2_parked_pw=1500,\n\n servo_1_degree_ms=-10, # milliseconds pulse-width per degree\n servo_2_degree_ms=10, # reversed for the mounting of the shoulder servo\n\n servo_1_parked_angle=-90, # the arm angle in the parked position\n servo_2_parked_angle=90,\n\n # ----------------- hysteresis -----------------\n\n hysteresis_correction_1=0, # hardware error compensation\n hysteresis_correction_2=0,\n\n # ----------------- servo angles and pulse-widths in lists -----------------\n\n servo_1_angle_pws=[], # pulse-widths for various angles\n servo_2_angle_pws=[],\n\n # ----------------- servo angles and pulse-widths in lists (bi-directional) ------\n\n servo_1_angle_pws_bidi = [], # bi-directional pulse-widths for various angles\n servo_2_angle_pws_bidi = [],\n\n # ----------------- the pen -----------------\n\n pw_up=1500, # pulse-widths for pen up/down\n pw_down=1100,\n\n # ----------------- misc -----------------\n\n wait=None, # default wait time between operations\n\n virtual = False, # run in virtual mode\n turtle = False\n ):\n\n # set the geometry\n self.inner_arm = inner_arm\n self.outer_arm = outer_arm\n\n self.virtual = virtual or force_virtual\n\n self.turtle = turtle\n if self.turtle:\n self.reset_turtle()\n\n # the box bounds describe a rectangle that we can safely draw in\n self.bounds = bounds\n\n # if pulse-widths to angles are supplied for each servo, we will feed them to\n # numpy.polyfit(), to produce a function for each one. Otherwise, we will use a simple\n # approximation based on a centre of travel of 1500µS and 10µS per degree\n\n self.servo_1_parked_pw = servo_1_parked_pw\n self.servo_1_degree_ms = servo_1_degree_ms\n self.servo_1_parked_angle = servo_1_parked_angle\n self.hysteresis_correction_1 = hysteresis_correction_1\n\n self.servo_2_parked_pw = servo_2_parked_pw\n self.servo_2_degree_ms = servo_2_degree_ms\n self.servo_2_parked_angle = servo_2_parked_angle\n self.hysteresis_correction_2 = hysteresis_correction_2\n\n # set some initial values required for moving methods\n self.previous_pw_1 = self.previous_pw_2 = 0\n self.active_hysteresis_correction_1 = self.active_hysteresis_correction_2 = 0\n self.reset_report()\n\n # Set the x and y position state, so it knows its current x/y position.\n self.x = -self.inner_arm\n self.y = self.outer_arm\n\n if servo_1_angle_pws_bidi:\n servo_1_angle_pws = []\n differences = []\n for angle, pws in servo_1_angle_pws_bidi.items():\n pw = (pws['acw'] + pws['cw']) / 2\n servo_1_angle_pws.append([angle, pw])\n differences.append((pws['acw'] - pws['cw']) / 2)\n self.hysteresis_correction_1 = numpy.mean(differences)\n\n if servo_1_angle_pws:\n servo_1_array = numpy.array(servo_1_angle_pws)\n self.angles_to_pw_1 = numpy.poly1d(\n numpy.polyfit(\n servo_1_array[:,0],\n servo_1_array[:,1],\n 3\n )\n )\n\n else:\n self.angles_to_pw_1 = self.naive_angles_to_pulse_widths_1\n\n if servo_2_angle_pws_bidi:\n servo_2_angle_pws = []\n differences = []\n for angle, pws in servo_2_angle_pws_bidi.items():\n pw = (pws['acw'] + pws['cw']) / 2\n servo_2_angle_pws.append([angle, pw])\n differences.append((pws['acw'] - pws['cw']) / 2)\n self.hysteresis_correction_2 = numpy.mean(differences)\n print(servo_2_angle_pws)\n\n if servo_2_angle_pws:\n servo_2_array = numpy.array(servo_2_angle_pws)\n self.angles_to_pw_2 = numpy.poly1d(\n numpy.polyfit(\n servo_2_array[:,0],\n servo_2_array[:,1],\n 3\n )\n )\n\n else:\n self.angles_to_pw_2 = self.naive_angles_to_pulse_widths_2\n\n\n # create the pen object\n self.pen = Pen(bg=self, pw_up=pw_up, pw_down=pw_down, virtual=self.virtual)\n\n if self.virtual:\n\n print(\"Initialising virtual BrachioGraph\")\n\n self.virtual_pw_1 = self.angles_to_pw_1(-90)\n self.virtual_pw_2 = self.angles_to_pw_2(90)\n\n # by default in virtual mode, we use a wait factor of 0 for speed\n self.wait = wait or 0\n\n else:\n\n # instantiate this Raspberry Pi as a pigpio.pi() instance\n self.rpi = pigpio.pi()\n\n # the pulse frequency should be no higher than 100Hz - higher values could (supposedly) damage the servos\n self.rpi.set_PWM_frequency(14, 50)\n self.rpi.set_PWM_frequency(15, 50)\n\n # by default we use a wait factor of 0.1 for accuracy\n self.wait = wait or .1\n\n self.set_angles(-90, 90)\n\n if self.turtle:\n self.turtle.showturtle()\n\n self.status()\n\n # methods in this class:\n # drawing\n # line-processing\n # test patterns\n # pen-moving\n # angles-to-pulse-widths\n # hardware-related\n # trigonometric\n # calibration\n # manual driving\n # reporting\n\n # ----------------- drawing methods -----------------\n\n def plot_file(self, filename=\"\", wait=0, interpolate=10, bounds=None):\n \"\"\"Passes the lines in the supplied JSON file to ``plot_lines()``\"\"\"\n\n wait = wait or self.wait\n bounds = bounds or self.bounds\n\n if not bounds:\n return \"File plotting is only possible when BrachioGraph.bounds is set.\"\n\n with open(filename, \"r\") as line_file:\n lines = json.load(line_file)\n\n self.plot_lines(lines=lines, wait=wait, interpolate=interpolate, bounds=bounds, flip=True)\n\n\n def plot_lines(self, lines=[], wait=0, interpolate=10, rotate=False, flip=False, bounds=None):\n \"\"\"Passes each segment of each line in lines to ``draw_line()``\"\"\"\n\n wait = wait or self.wait\n bounds = bounds or self.bounds\n\n if not bounds:\n return \"Line plotting is only possible when BrachioGraph.bounds is set.\"\n\n lines = self.rotate_and_scale_lines(lines=lines, bounds=bounds, flip=True)\n\n for line in tqdm.tqdm(lines, desc=\"Lines\", leave=False):\n x, y = line[0]\n\n # only if we are not within 1mm of the start of the line, lift pen and go there\n if (round(self.x, 1), round(self.y, 1)) != (round(x, 1), round(y, 1)):\n self.xy(x, y, wait=wait, interpolate=interpolate)\n\n for point in tqdm.tqdm(line[1:], desc=\"Segments\", leave=False):\n x, y = point\n self.xy(x, y, wait=wait, interpolate=interpolate, draw=True)\n\n self.park()\n\n\n def draw_line(self, start=(0, 0), end=(0, 0), wait=0, interpolate=10, both=False):\n \"\"\"Draws a straight line between two points\"\"\"\n\n wait = wait or self.wait\n\n start_x, start_y = start\n end_x, end_y = end\n\n self.xy(x=start_x, y=start_y, wait=wait, interpolate=interpolate)\n\n self.xy(x=end_x, y=end_y, wait=wait, interpolate=interpolate, draw=True)\n\n if both:\n self.xy(x=start_x, y=start_y, wait=wait, interpolate=interpolate, draw=True)\n\n\n # ----------------- line-processing methods -----------------\n\n def rotate_and_scale_lines(self, lines=[], rotate=False, flip=False, bounds=None):\n\n rotate, x_mid_point, y_mid_point, box_x_mid_point, box_y_mid_point, divider = self.analyse_lines(\n lines=lines, rotate=rotate, bounds=bounds\n )\n\n for line in lines:\n\n for point in line:\n if rotate:\n point[0], point[1] = point[1], point[0]\n\n x = point[0]\n x = x - x_mid_point # shift x values so that they have zero as their mid-point\n x = x / divider # scale x values to fit in our box width\n\n\n if flip ^ rotate: # flip before moving back into drwaing pane\n x = -x\n\n x = x + box_x_mid_point # shift x values so that they have the box x midpoint as their endpoint\n\n\n y = point[1]\n y = y - y_mid_point\n y = y / divider\n y = y + box_y_mid_point\n\n point[0], point[1] = x, y\n\n return lines\n\n\n def analyse_lines(self, lines=[], rotate=False, bounds=None):\n\n # lines is a tuple itself containing a number of tuples, each of which contains a number of 2-tuples\n #\n # [ # |\n # [ # |\n # [3, 4], # | # |\n # [2, 4], # | # |\n # [1, 5], # a single point in a line # | a list of points defining a line # |\n # [3, 5], # | # |\n # [3, 7], # | # |\n # ], # |\n # [ # | all the lines\n # [...], # |\n # [...], # |\n # ], # |\n # [ # |\n # [...], # |\n # [...], # |\n # ], # |\n # ] # |\n\n # First, we create a pair of empty sets for all the x and y values in all of the lines of the plot data.\n\n x_values_in_lines = set()\n y_values_in_lines = set()\n\n # Loop over each line and all the points in each line, to get sets of all the x and y values:\n\n for line in lines:\n\n x_values_in_line, y_values_in_line = zip(*line)\n\n x_values_in_lines.update(x_values_in_line)\n y_values_in_lines.update(y_values_in_line)\n\n # Identify the minimum and maximum values.\n\n min_x, max_x = min(x_values_in_lines), max(x_values_in_lines)\n min_y, max_y = min(y_values_in_lines), max(y_values_in_lines)\n\n # Identify the range they span.\n\n x_range, y_range = max_x - min_x, max_y - min_y\n box_x_range, box_y_range = bounds[2] - bounds[0], bounds[3] - bounds[1]\n\n # And their mid-points.\n\n x_mid_point, y_mid_point = (max_x + min_x) / 2, (max_y + min_y) / 2\n box_x_mid_point, box_y_mid_point = (bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2\n\n # Get a 'divider' value for each range - the value by which we must divide all x and y so that they will\n # fit safely inside the drawing range of the plotter.\n\n # If both image and box are in portrait orientation, or both in landscape, we don't need to rotate the plot.\n\n if (x_range >= y_range and box_x_range >= box_y_range) or (x_range <= y_range and box_x_range <= box_y_range):\n\n divider = max((x_range / box_x_range), (y_range / box_y_range))\n rotate = False\n\n else:\n\n divider = max((x_range / box_y_range), (y_range / box_x_range))\n rotate = True\n x_mid_point, y_mid_point = y_mid_point, x_mid_point\n\n return rotate, x_mid_point, y_mid_point, box_x_mid_point, box_y_mid_point, divider\n\n\n # ----------------- test pattern methods -----------------\n\n def test_pattern(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):\n\n self.vertical_lines(\n bounds=bounds, lines=lines, wait=wait, interpolate=interpolate, repeat=repeat, reverse=reverse, both=both\n )\n self.horizontal_lines(\n bounds=bounds, lines=lines, wait=wait, interpolate=interpolate, repeat=repeat, reverse=reverse, both=both\n )\n\n\n def vertical_lines(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):\n\n wait = wait or self.wait\n bounds = bounds or self.bounds\n\n if not bounds:\n return \"Plotting a test pattern is only possible when BrachioGraph.bounds is set.\"\n\n if not reverse:\n top_y = self.bounds[1]\n bottom_y = self.bounds[3]\n else:\n bottom_y = self.bounds[1]\n top_y = self.bounds[3]\n\n for n in range(repeat):\n step = (self.bounds[2] - self.bounds[0]) / lines\n x = self.bounds[0]\n while x <= self.bounds[2]:\n self.draw_line((x, top_y), (x, bottom_y), interpolate=interpolate, both=both)\n x = x + step\n\n self.park()\n\n\n def horizontal_lines(self, bounds=None, lines=4, wait=0, interpolate=10, repeat=1, reverse=False, both=False):\n\n wait = wait or self.wait\n bounds = bounds or self.bounds\n\n if not bounds:\n return \"Plotting a test pattern is only possible when BrachioGraph.bounds is set.\"\n\n if not reverse:\n min_x = self.bounds[0]\n max_x = self.bounds[2]\n else:\n max_x = self.bounds[0]\n min_x = self.bounds[2]\n\n for n in range(repeat):\n step = (self.bounds[3] - self.bounds[1]) / lines\n y = self.bounds[1]\n while y <= self.bounds[3]:\n self.draw_line((min_x, y), (max_x, y), interpolate=interpolate, both=both)\n y = y + step\n\n self.park()\n\n\n def box(self, bounds=None, wait=0, interpolate=10, repeat=1, reverse=False):\n \"\"\"Draw a box marked out by the ``bounds``.\"\"\"\n\n wait = wait or self.wait\n bounds = bounds or self.bounds\n\n if not bounds:\n return \"Box drawing is only possible when BrachioGraph.bounds is set.\"\n\n self.xy(bounds[0], bounds[1], wait, interpolate)\n\n for r in tqdm.tqdm(tqdm.trange(repeat), desc='Iteration', leave=False):\n\n if not reverse:\n\n self.xy(bounds[2], bounds[1], wait, interpolate, draw=True)\n self.xy(bounds[2], bounds[3], wait, interpolate, draw=True)\n self.xy(bounds[0], bounds[3], wait, interpolate, draw=True)\n self.xy(bounds[0], bounds[1], wait, interpolate, draw=True)\n\n else:\n\n self.xy(bounds[0], bounds[3], wait, interpolate, draw=True)\n self.xy(bounds[2], bounds[3], wait, interpolate, draw=True)\n self.xy(bounds[2], bounds[1], wait, interpolate, draw=True)\n self.xy(bounds[0], bounds[1], wait, interpolate, draw=True)\n\n self.park()\n\n\n def test_arcs(self):\n self.park()\n elbow_angle = 120\n self.move_angles(angle_2=elbow_angle)\n\n for angle_1 in range(-135, 15, 15):\n self.move_angles(angle_1=angle_1, draw=True)\n\n for angle_2 in range(elbow_angle, elbow_angle+16):\n self.move_angles(angle_2=angle_2, draw=True)\n for angle_2 in range(elbow_angle+16, elbow_angle-16, -1):\n self.move_angles(angle_2=angle_2, draw=True)\n for angle_2 in range(elbow_angle-16, elbow_angle+1):\n self.move_angles(angle_2=angle_2, draw=True)\n\n\n # ----------------- pen-moving methods -----------------\n\n def xy(self, x=None, y=None, wait=0, interpolate=10, draw=False):\n \"\"\"Moves the pen to the xy position; optionally draws while doing it.\"\"\"\n\n wait = wait or self.wait\n\n if draw:\n self.pen.down()\n else:\n self.pen.up()\n\n x = x or self.x\n y = y or self.y\n\n (angle_1, angle_2) = self.xy_to_angles(x, y)\n\n # calculate how many steps we need for this move, and the x/y length of each\n (x_length, y_length) = (x - self.x, y - self.y)\n\n length = math.sqrt(x_length ** 2 + y_length **2)\n\n no_of_steps = int(length * interpolate) or 1\n\n if no_of_steps < 100:\n disable_tqdm = True\n else:\n disable_tqdm = False\n\n (length_of_step_x, length_of_step_y) = (x_length/no_of_steps, y_length/no_of_steps)\n\n for step in tqdm.tqdm(range(no_of_steps), desc='Interpolation', leave=False, disable=disable_tqdm):\n\n self.x = self.x + length_of_step_x\n self.y = self.y + length_of_step_y\n\n angle_1, angle_2 = self.xy_to_angles(self.x, self.y)\n\n self.set_angles(angle_1, angle_2)\n\n if step + 1 < no_of_steps:\n sleep(length * wait/no_of_steps)\n\n sleep(length * wait/10)\n\n\n def move_angles(self, angle_1=None, angle_2=None, wait=0, interpolate=10, draw=False):\n \"\"\"Moves the servo motors to the specified angles step-by-step, calling set_angles() for each step.\"\"\"\n\n wait = wait or self.wait\n\n if draw:\n self.pen.down()\n else:\n self.pen.up()\n\n diff_1 = diff_2 = 0\n\n if angle_1 is not None:\n diff_1 = angle_1 - self.angle_1\n if angle_2 is not None:\n diff_2 = angle_2 - self.angle_2\n\n length = math.sqrt(diff_1 ** 2 + diff_2 **2)\n\n no_of_steps = int(length * interpolate) or 1\n\n if no_of_steps < 100:\n disable_tqdm = True\n else:\n disable_tqdm = False\n\n (length_of_step_1, length_of_step_2) = (diff_1/no_of_steps, diff_2/no_of_steps)\n\n for step in tqdm.tqdm(range(no_of_steps), desc='Interpolation', leave=False, disable=disable_tqdm):\n\n self.angle_1 = self.angle_1 + length_of_step_1\n self.angle_2 = self.angle_2 + length_of_step_2\n\n self.set_angles(self.angle_1, self.angle_2)\n\n if step + 1 < no_of_steps:\n sleep(length * wait/no_of_steps)\n\n sleep(length * wait/10)\n\n\n def set_angles(self, angle_1=None, angle_2=None):\n \"\"\"Moves the servo motors to the specified angles immediately. Relies upon getting accurate pulse-width\n values.\n\n Calls set_pulse_widths().\n\n Sets current_x, current_y.\n \"\"\"\n\n pw_1 = pw_2 = None\n\n if angle_1 is not None:\n pw_1 = self.angles_to_pw_1(angle_1)\n\n if pw_1 > self.previous_pw_1:\n self.active_hysteresis_correction_1 = self.hysteresis_correction_1\n elif pw_1 < self.previous_pw_1:\n self.active_hysteresis_correction_1 = - self.hysteresis_correction_1\n\n self.previous_pw_1 = pw_1\n\n pw_1 = pw_1 + self.active_hysteresis_correction_1\n\n self.angle_1 = angle_1\n self.angles_used_1.add(int(angle_1))\n self.pulse_widths_used_1.add(int(pw_1))\n\n if angle_2 is not None:\n pw_2 = self.angles_to_pw_2(angle_2)\n\n if pw_2 > self.previous_pw_2:\n self.active_hysteresis_correction_2 = self.hysteresis_correction_2\n elif pw_2 < self.previous_pw_2:\n self.active_hysteresis_correction_2 = - self.hysteresis_correction_2\n\n self.previous_pw_2 = pw_2\n\n pw_2 = pw_2 + self.active_hysteresis_correction_2\n\n self.angle_2 = angle_2\n self.angles_used_2.add(int(angle_2))\n self.pulse_widths_used_2.add(int(pw_2))\n\n if self.turtle:\n\n x, y = self.angles_to_xy(self.angle_1, self.angle_2)\n\n self.turtle.setx(x * self.turtle.multiplier)\n self.turtle.sety(y * self.turtle.multiplier)\n\n self.set_pulse_widths(pw_1, pw_2)\n self.x, self.y = self.angles_to_xy(self.angle_1, self.angle_2)\n\n\n # ----------------- angles-to-pulse-widths methods -----------------\n\n def naive_angles_to_pulse_widths_1(self, angle):\n return (angle - self.servo_1_parked_angle) * self.servo_1_degree_ms + self.servo_1_parked_pw\n\n def naive_angles_to_pulse_widths_2(self, angle):\n return (angle - self.servo_2_parked_angle) * self.servo_2_degree_ms + self.servo_2_parked_pw\n\n\n # ----------------- hardware-related methods -----------------\n\n def set_pulse_widths(self, pw_1=None, pw_2=None):\n \"\"\"Applies the supplied pulse-width values to the servos, or pretends to, if we're in virtual\n mode.\"\"\"\n\n if self.virtual:\n\n if pw_1:\n if 500 < pw_1 < 2500:\n self.virtual_pw_1 = pw_1\n else:\n raise ValueError\n\n if pw_2:\n if 500 < pw_2 < 2500:\n self.virtual_pw_2 = pw_2\n else:\n raise ValueError\n\n else:\n\n if pw_1:\n self.rpi.set_servo_pulsewidth(14, pw_1)\n if pw_2:\n self.rpi.set_servo_pulsewidth(15, pw_2)\n\n\n def get_pulse_widths(self):\n \"\"\"Returns the actual pulse-widths values; if in virtual mode, returns the nominal values - i.e. the\n values that they might be.\n \"\"\"\n\n if self.virtual:\n\n actual_pulse_width_1 = self.virtual_pw_1\n actual_pulse_width_2 = self.virtual_pw_2\n\n else:\n\n actual_pulse_width_1 = self.rpi.get_servo_pulsewidth(14)\n actual_pulse_width_2 = self.rpi.get_servo_pulsewidth(15)\n\n return (actual_pulse_width_1, actual_pulse_width_2)\n\n\n def park(self):\n \"\"\"Park the plotter with the inner arm at -90˚ and the outer arm at 90˚ to it.\n\n This corresponds to an x/y position:\n\n * x: ``-inner_arm``\n * y: ``outer_arm``\n \"\"\"\n\n if self.virtual:\n print(\"Parking\")\n\n self.pen.up()\n\n\n self.xy(-self.inner_arm, self.outer_arm)\n sleep(1)\n\n\n def quiet(self, servos=[14, 15, 18]):\n \"\"\"Stop sending pulses to the servos, so that they are no longer energised (and so that they\n stop buzzing).\n \"\"\"\n\n if self.virtual:\n print(\"Going quiet\")\n\n else:\n for servo in servos:\n self.rpi.set_servo_pulsewidth(servo, 0)\n\n\n # ----------------- trigonometric methods -----------------\n\n # Every x/y position of the plotter corresponds to a pair of angles of the arms. These methods\n # calculate:\n #\n # the angles required to reach any x/y position\n # the x/y position represented by any pair of angles\n\n def xy_to_angles(self, x=0, y=0):\n \"\"\"Return the servo angles required to reach any x/y position.\"\"\"\n\n hypotenuse = math.sqrt(x**2+y**2)\n\n if hypotenuse > self.inner_arm + self.outer_arm:\n raise Exception(f\"Cannot reach {hypotenuse}; total arm length is {self.inner_arm + self.outer_arm}\")\n\n hypotenuse_angle = math.asin(x/hypotenuse)\n\n inner_angle = math.acos(\n (hypotenuse**2+self.inner_arm**2-self.outer_arm**2)/(2*hypotenuse*self.inner_arm)\n )\n outer_angle = math.acos(\n (self.inner_arm**2+self.outer_arm**2-hypotenuse**2)/(2*self.inner_arm*self.outer_arm)\n )\n\n shoulder_motor_angle = hypotenuse_angle - inner_angle\n elbow_motor_angle = math.pi - outer_angle\n\n return (math.degrees(shoulder_motor_angle), math.degrees(elbow_motor_angle))\n\n\n def angles_to_xy(self, shoulder_motor_angle, elbow_motor_angle):\n \"\"\"Return the x/y co-ordinates represented by a pair of servo angles.\"\"\"\n\n elbow_motor_angle = math.radians(elbow_motor_angle)\n shoulder_motor_angle = math.radians(shoulder_motor_angle)\n\n hypotenuse = math.sqrt(\n (self.inner_arm ** 2 + self.outer_arm ** 2 - 2 * self.inner_arm * self.outer_arm * math.cos(\n math.pi - elbow_motor_angle)\n )\n )\n base_angle = math.acos(\n (hypotenuse ** 2 + self.inner_arm ** 2 - self.outer_arm ** 2) / (2 * hypotenuse * self.inner_arm)\n )\n inner_angle = base_angle + shoulder_motor_angle\n\n x = math.sin(inner_angle) * hypotenuse\n y = math.cos(inner_angle) * hypotenuse\n\n return(x, y)\n\n\n # ----------------- calibration -----------------\n\n def auto_calibrate(self):\n self.park()\n\n for elbow in range(90, 136):\n self.set_angles(None, elbow)\n sleep(.01)\n\n for shoulder in range(-90, -140, -1):\n self.set_angles(shoulder, None)\n sleep(.01)\n\n\n def calibrate(self, servo=1):\n\n pin = {1: 14, 2: 15}[servo]\n\n servo_centre = {1: self.servo_1_parked_pw, 2: self.servo_2_parked_pw}.get(servo)\n servo_angle_pws = []\n texts = {\n \"arm-name\": {1: \"inner\", 2: \"outer\"},\n \"nominal-centre\": {1: 0, 2: 90},\n \"mount-arm\": {\n 1: \"(straight ahead)\",\n 2: \"(i.e. to the right) to the inner arm)\"\n },\n \"safe-guess\": {1: -60, 2: 90}\n }\n\n pw = servo_centre\n\n print(f\"Calibrating servo {servo}, for the {texts['arm-name'][servo]} arm.\")\n print(f\"See https://brachiograph.art/how-to/calibrate.html\")\n print()\n self.rpi.set_servo_pulsewidth(pin, pw)\n print(f\"The servo is now at {pw}µS, in the centre of its range of movement.\")\n print(\"Attach the protractor to the base, with its centre at the axis of the servo.\")\n\n print(f\"Mount the arm at a position as close as possible to {texts['nominal-centre'][servo]}˚ {texts['mount-arm'][servo]}.\")\n\n print(\"Now drive the arm to a known angle, as marked on the protractor.\")\n print(\"When the arm reaches the angle, press 1 and record the angle. Do this for as many angles as possible.\")\n print()\n print(\"When you have done all the angles, press 2.\")\n print(\"Press 0 to exit at any time.\")\n\n while True:\n key = readchar.readchar()\n\n if key == \"0\":\n return\n elif key == \"1\":\n angle = float(input(\"Enter the angle: \"))\n servo_angle_pws.append([angle, pw])\n elif key == \"2\":\n break\n elif key==\"a\":\n pw = pw - 10\n elif key==\"s\":\n pw = pw + 10\n elif key==\"A\":\n pw = pw - 1\n elif key==\"S\":\n pw = pw + 1\n else:\n continue\n\n print(pw)\n\n self.rpi.set_servo_pulsewidth(pin, pw)\n\n print(f\"------------------------\")\n print(f\"Recorded angles servo {servo}\")\n print(f\"------------------------\")\n print(f\" angle | pulse-width \")\n print(f\"---------+--------------\")\n\n servo_angle_pws.sort()\n for [angle, pw] in servo_angle_pws:\n print(f\" {angle:>6.1f} | {pw:>4.0f}\")\n\n servo_array = numpy.array(servo_angle_pws)\n\n pw = int(numpy.poly1d(\n numpy.polyfit(\n servo_array[:,0],\n servo_array[:,1],\n 3\n )\n )(0))\n\n self.rpi.set_servo_pulsewidth(pin, pw)\n print()\n print(f\"The servo is now at {int(pw)}µS, which should correspond to {texts['nominal-centre'][servo]}˚.\")\n print(\"If necessary, remount the arm at the centre of its optimal sweep for your drawing area.\")\n print()\n print(f\"Alternatively as a rule of thumb, if the arms are of equal length, use the position closest to {texts['safe-guess'][servo]}˚.\")\n\n print(\"Carefully count how many spline positions you had to move the arm by to get it there.\")\n print(\"Multiply that by the number of degrees for each spline to get the angle by which you moved it.\")\n offset = float(input(\"Enter the angle by which you moved the arm (anti-clockwise is negative): \"))\n\n print(f\"---------------------------\")\n print(f\"Calculated angles {texts['arm-name'][servo]} arm\")\n print(f\"---------------------------\")\n print(f\" angle | pulse-width \")\n print(f\"----------+----------------\")\n\n servo_angle_including_offset_pws = []\n\n for [angle, pw] in servo_angle_pws:\n angle_including_offset = round(angle + offset, 1)\n servo_angle_including_offset_pws.append([angle_including_offset, pw])\n print(f\" {angle:>6.1f} | {pw:>4.0f}\")\n\n print()\n print(\"Use this list of angles and pulse-widths in your BrachioGraph definition:\")\n print()\n print(f\"servo_{servo}_angle_pws={servo_angle_including_offset_pws}\")\n\n\n # ----------------- manual driving methods -----------------\n\n def drive(self):\n\n # adjust the pulse-widths using the keyboard\n\n pw_1, pw_2 = self.get_pulse_widths()\n\n self.set_pulse_widths(pw_1, pw_2)\n\n while True:\n key = readchar.readchar()\n\n if key == \"0\":\n return\n elif key==\"a\":\n pw_1 = pw_1 - 10\n elif key==\"s\":\n pw_1 = pw_1 + 10\n elif key==\"A\":\n pw_1 = pw_1 - 2\n elif key==\"S\":\n pw_1 = pw_1 + 2\n elif key==\"k\":\n pw_2 = pw_2 - 10\n elif key==\"l\":\n pw_2 = pw_2 + 10\n elif key==\"K\":\n pw_2 = pw_2 - 2\n elif key==\"L\":\n pw_2 = pw_2 + 2\n\n print(pw_1, pw_2)\n\n self.set_pulse_widths(pw_1, pw_2)\n\n\n def drive_xy(self):\n\n # move the pen up/down and left/right using the keyboard\n\n while True:\n key = readchar.readchar()\n\n if key == \"0\":\n return\n elif key==\"a\":\n self.x = self.x - 1\n elif key==\"s\":\n self.x = self.x + 1\n elif key==\"A\":\n self.x = self.x - .1\n elif key==\"S\":\n self.x = self.x + .1\n elif key==\"k\":\n self.y = self.y - 1\n elif key==\"l\":\n self.y = self.y + 1\n elif key==\"K\":\n self.y = self.y - .1\n elif key==\"L\":\n self.y = self.y + .1\n\n print(self.x, self.y)\n\n self.xy(self.x, self.y)\n\n\n # ----------------- reporting methods -----------------\n\n def status(self):\n print(\"------------------------------------------\")\n print(\" | Servo 1 | Servo 2 \")\n print(\" | Shoulder| Elbow \")\n print(\"----------------------|---------|---------\")\n\n pw_1, pw_2 = self.get_pulse_widths()\n print(f\"{'pulse-width |':>23}\", f\"{pw_1:>7.0f}\", \"|\", f\"{pw_2:>7.0f}\")\n\n angle_1, angle_2 = self.angle_1, self.angle_2\n print(f\"{'angle |':>23}\", f\"{angle_1:>7.0f}\", \"|\", f\"{angle_2:>7.0f}\")\n\n h1, h2 = self.hysteresis_correction_1, self.hysteresis_correction_2\n print(f\"{'hysteresis correction |':>23}\", f\"{h1:>7.1f}\", \"|\", f\"{h2:>7.1f}\")\n print(\"------------------------------------------\")\n print(f\"{'x/y location |':>23}\", f\"{self.x:>7.1f}\", \"|\", f\"{self.y:>7.1f}\")\n print()\n print(\"------------------------------------------\")\n print(\"pen:\", self.pen.position)\n\n bl = self.bounds[0], self.bounds[1]\n tr = self.bounds[2], self.bounds[3]\n print(\"------------------------------------------\")\n print(\"bottom left:\", bl, \"top right:\", tr)\n print(\"------------------------------------------\")\n\n\n def report(self):\n\n print(f\" -----------------|-----------------\")\n print(f\" Servo 1 | Servo 2 \")\n print(f\" -----------------|-----------------\")\n\n h1, h2 = self.hysteresis_correction_1, self.hysteresis_correction_2\n print(f\"hysteresis {h1:>2.1f} | {h2:>2.1f}\")\n\n pw_1, pw_2 = self.get_pulse_widths()\n print(f\"pulse-width {pw_1:<4.0f} | {pw_2:<4.0f}\")\n\n angle_1, angle_2 = self.angle_1, self.angle_2\n\n if angle_1 and angle_2:\n\n print(f\" angle {angle_1:>4.0f} | {angle_2:>4.0f}\")\n\n print(f\" -----------------|-----------------\")\n print(f\" min max mid | min max mid\")\n print(f\" -----------------|-----------------\")\n\n if self.angles_used_1 and self.angles_used_2 and self.pulse_widths_used_1 and self.pulse_widths_used_2:\n\n min1 = min(self.pulse_widths_used_1)\n max1 = max(self.pulse_widths_used_1)\n mid1 = (min1 + max1) / 2\n min2 = min(self.pulse_widths_used_2)\n max2 = max(self.pulse_widths_used_2)\n mid2 = (min2 + max2) / 2\n\n print(f\"pulse-widths {min1:>4.0f} {max1:>4.0f} {mid1:>4.0f} | {min2:>4.0f} {max2:>4.0f} {mid2:>4.0f}\")\n\n min1 = min(self.angles_used_1)\n max1 = max(self.angles_used_1)\n mid1 = (min1 + max1) / 2\n min2 = min(self.angles_used_2)\n max2 = max(self.angles_used_2)\n mid2 = (min2 + max2) / 2\n\n print(f\" angles {min1:>4.0f} {max1:>4.0f} {mid1:>4.0f} | {min2:>4.0f} {max2:>4.0f} {mid2:>4.0f}\")\n\n else:\n\n print(\"No data recorded yet. Try calling the BrachioGraph.box() method first.\")\n\n\n def reset_report(self):\n\n self.angle_1 = self.angle_2 = None\n\n # Create sets for recording movement of the plotter.\n self.angles_used_1 = set()\n self.angles_used_2 = set()\n self.pulse_widths_used_1 = set()\n self.pulse_widths_used_2 = set()\n\n\n @property\n def bl(self):\n return (self.bounds[0], self.bounds[1])\n\n @property\n def tl(self):\n return (self.bounds[0], self.bounds[3])\n\n @property\n def tr(self):\n return (self.bounds[2], self.bounds[3])\n\n @property\n def br(self):\n return (self.bounds[2], self.bounds[1])\n\n\n def reset_turtle(self):\n self.turtle = BrachioGraphTurtle(\n inner_arm=self.inner_arm, # the length of the inner arm (blue)\n shoulder_centre_angle=-90, # the starting angle of the inner arm, relative to straight ahead\n shoulder_sweep=180, # the arc covered by the shoulder motor\n\n outer_arm=self.outer_arm, # the length of the outer arm (red)\n elbow_centre_angle=90, # the centre of the outer arm relative to the inner arm\n elbow_sweep=180, # the arc covered by the elbow motor\n\n window_size=800, # width and height of the turtle canvas\n speed=0, # how fast to draw\n )\n\n self.turtle.draw_grid()\n\n\nclass Pen:\n\n def __init__(self, bg, pw_up=1700, pw_down=1300, pin=18, transition_time=0.25, virtual=False):\n\n self.bg = bg\n self.pin = pin\n self.pw_up = pw_up\n self.pw_down = pw_down\n self.transition_time = transition_time\n self.virtual = virtual\n if self.virtual:\n\n print(\"Initialising virtual Pen\")\n\n else:\n\n self.rpi = pigpio.pi()\n self.rpi.set_PWM_frequency(self.pin, 50)\n\n self.up()\n sleep(0.3)\n self.down()\n sleep(0.3)\n self.up()\n sleep(0.3)\n\n\n def down(self):\n\n if self.virtual:\n self.virtual_pw = self.pw_down\n\n else:\n self.rpi.set_servo_pulsewidth(self.pin, self.pw_down)\n sleep(self.transition_time)\n\n if self.bg.turtle:\n self.bg.turtle.down()\n self.bg.turtle.color('blue')\n self.bg.turtle.width(1)\n\n self.position = \"down\"\n\n\n def up(self):\n\n if self.virtual:\n self.virtual_pw = self.pw_up\n\n else:\n self.rpi.set_servo_pulsewidth(self.pin, self.pw_up)\n sleep(self.transition_time)\n\n if self.bg.turtle:\n self.bg.turtle.up()\n\n self.position = \"up\"\n\n\n # for convenience, a quick way to set pen motor pulse-widths\n def pw(self, pulse_width):\n\n if self.virtual:\n self.virtual_pw = pulse_width\n\n else:\n self.rpi.set_servo_pulsewidth(self.pin, pulse_width)\n\n\n def calibrate(self):\n\n print(f\"Calibrating the pen-lifting servo.\")\n print(f\"See https://brachiograph.art/how-to/calibrate.html\")\n\n pw_1, pw_2 = self.bg.get_pulse_widths()\n pw_3 = self.pw_up\n\n while True:\n self.bg.set_pulse_widths(pw_1, pw_2)\n self.pw(pw_3)\n\n key = readchar.readchar()\n\n if key == \"0\":\n break\n elif key==\"a\":\n pw_1 = pw_1 - 10\n continue\n elif key==\"s\":\n pw_1 = pw_1 + 10\n continue\n elif key==\"k\":\n pw_2 = pw_2 - 10\n continue\n elif key==\"l\":\n pw_2 = pw_2 + 10\n continue\n\n elif key==\"t\":\n if pw_3 == self.pw_up:\n pw_3 = self.pw_down\n else:\n pw_3 = self.pw_up\n continue\n\n elif key==\"z\":\n pw_3 = pw_3 - 10\n print(pw_3)\n continue\n elif key==\"x\":\n pw_3 = pw_3 + 10\n print(pw_3)\n continue\n\n elif key==\"u\":\n self.pw_up = pw_3\n elif key==\"d\":\n self.pw_down = pw_3\n else:\n continue\n\n mid = (self.pw_up + self.pw_down) / 2\n print(f\"Pen-up pulse-width: {self.pw_up}µS, pen-down pulse-width: {self.pw_down}µS, mid-point: {mid}\")\n\n print()\n print(\"Use these values in your BrachioGraph definition:\")\n print()\n print(f\"pen_up={self.pw_up}, pen_down={self.pw_down}\")\n" ]
[ [ "numpy.array", "numpy.polyfit", "numpy.mean" ] ]
jackyhuynh/graph_traverse_using_networkx
[ "11988af1324cbd1520102c66776476d91c2d087e" ]
[ "truc_graph_traverse.py" ]
[ "# Required library\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\n\"\"\"\n# openfile read input from textfile\n# In fact, I test the data on different jupiter notebook and come up with the shortest version\n\"\"\"\n\n\ndef openfile(filename):\n with open(filename, \"r\") as file_reader:\n all_lines = file_reader.readlines()\n return all_lines\n\n\n\"\"\"\ncreate_data(data): take parameter data, remove all the newline, space and then convert each line to a tuple\ncreate RoadDict dictionary and list of tuple clean_data\n\"\"\"\n\n\ndef create_data(data):\n clean_data = []\n road_dict = {}\n count = 0\n # Clean the data by using for loop\n for line in data:\n line = line.replace('\\n', '').replace(' ', '').split(',')\n road_dict[count] = {'citi1': line[0], 'citi2': line[1], 'distance': line[2]}\n clean_data.append((line[0], line[1], float(line[2])))\n return clean_data, road_dict\n\n\n\"\"\"\nSimple get the input from user, and validation, to make sure it not crash the application\n\"\"\"\n\n\ndef get_user_input(cities, purpose):\n user_input = input(f\"Please enter the {purpose} city: \").capitalize()\n while user_input not in cities:\n cities_display(cities)\n user_input = input(f\"Please enter the {purpose} city again: \").capitalize()\n\n return user_input\n\n\n\"\"\"\nPrint out the cities in the list\n\"\"\"\n\n\ndef cities_display(cities):\n print(\"Target city and Destination city must be:\")\n for citi in cities:\n print(citi, end=', ')\n\n print('')\n\n\nif __name__ == '__main__':\n # Preparation:\n # create RoadDict as requirement and graph_data to feed in networkx to create graphs\n graph_data, road_dict = create_data(openfile(\"frenchcities.txt\"))\n # create multi graph using networkx\n multi_graph = nx.MultiGraph()\n multi_graph.add_weighted_edges_from(graph_data)\n\n # Convert the graph to dictionary with weight\n multi_graph_dict = dict(multi_graph.degree(weight='weight'))\n # create the city list for validation only\n cities_list = list(multi_graph_dict)\n\n # Task 1: print out the data\n nx.draw(multi_graph, with_labels=True, font_weight='bold')\n plt.show()\n\n # Task 2:\n cities_display(cities_list)\n target = get_user_input(cities_list, \"target\")\n destination = get_user_input(cities_list, \"destination\")\n\n # Using Kilometer because it is the standard measurement in France:\n # Searching using Dijkstra Algorithm (Bread First Search)\n print(f\"BFS: Cities need to travel: {nx.dijkstra_path(multi_graph, target, destination)}, \"\n f\"total distance: {nx.dijkstra_path_length(multi_graph, target, destination)} Km\")\n # Searching using Bellman Forf Algorithm (Depth First Search)\n print(f\"DFS: Cities need to travel: {nx.bellman_ford_path(multi_graph, target, destination)}, \"\n f\"total distance: {nx.bellman_ford_path_length(multi_graph, target, destination)} Km\")\n" ]
[ [ "matplotlib.pyplot.show" ] ]
Oewyn/onnxmltools
[ "8dbd844dab77754971f59d4d533e6763ce0b03c2" ]
[ "onnxmltools/convert/common/shape_calculator.py" ]
[ "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"\nCommon functions to convert any learner based on trees.\n\"\"\"\nimport numpy as np\nimport numbers\nimport six\nfrom ._registration import register_shape_calculator\nfrom .data_types import Int64TensorType, FloatTensorType, StringTensorType, DictionaryType, SequenceType\nfrom .utils import check_input_and_output_numbers, check_input_and_output_types, compare_strict_version\n\n\ndef calculate_linear_classifier_output_shapes(operator):\n '''\n This operator maps an input feature vector into a scalar label if the number of outputs is one. If two outputs\n appear in this operator's output list, we should further generate a map storing all classes' probabilities.\n\n Allowed input/output patterns are\n 1. [N, C] ---> [N, 1], A sequence of map\n\n Note that the second case is not allowed as long as ZipMap only produces dictionary.\n '''\n check_input_and_output_numbers(operator, input_count_range=1, output_count_range=[1, 2])\n check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType])\n\n if len(operator.inputs[0].type.shape) != 2:\n raise RuntimeError('Input must be a [N, C]-tensor')\n\n N = operator.inputs[0].type.shape[0]\n\n class_labels = operator.raw_operator.classes_\n if all(isinstance(i, np.ndarray) for i in class_labels):\n class_labels = np.concatenate(class_labels)\n if all(isinstance(i, (six.string_types, six.text_type)) for i in class_labels):\n operator.outputs[0].type = StringTensorType(shape=[N])\n if len(class_labels) > 2 or operator.type != 'SklearnLinearSVC':\n # For multi-class classifier, we produce a map for encoding the probabilities of all classes\n if compare_strict_version(operator.targeted_onnx_version, '1.2') < 0:\n operator.outputs[1].type = DictionaryType(StringTensorType([1]), FloatTensorType([1]))\n else:\n operator.outputs[1].type = SequenceType(DictionaryType(StringTensorType([]), FloatTensorType([])), N)\n else:\n # For binary classifier, we produce the probability of the positive class\n operator.outputs[1].type = FloatTensorType(shape=[N, 1])\n elif all(isinstance(i, (numbers.Real, bool, np.bool_)) for i in class_labels):\n operator.outputs[0].type = Int64TensorType(shape=[N])\n if len(class_labels) > 2 or operator.type != 'SklearnLinearSVC':\n # For multi-class classifier, we produce a map for encoding the probabilities of all classes\n if compare_strict_version(operator.targeted_onnx_version, '1.2') < 0:\n operator.outputs[1].type = DictionaryType(Int64TensorType([1]), FloatTensorType([1]))\n else:\n operator.outputs[1].type = SequenceType(DictionaryType(Int64TensorType([]), FloatTensorType([])), N)\n else:\n # For binary classifier, we produce the probability of the positive class\n operator.outputs[1].type = FloatTensorType(shape=[N, 1])\n else:\n raise ValueError('Unsupported or mixed label types')\n\n\ndef calculate_linear_regressor_output_shapes(operator):\n '''\n Allowed input/output patterns are\n 1. [N, C] ---> [N, 1]\n\n This operator produces a scalar prediction for every example in a batch. If the input batch size is N, the output\n shape may be [N, 1].\n '''\n check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)\n\n N = operator.inputs[0].type.shape[0]\n operator.outputs[0].type = FloatTensorType([N, 1])\n\n" ]
[ [ "numpy.concatenate" ] ]
omikabir/omEngin
[ "b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195" ]
[ "Z_ALL_FILE/Jy1/1092020-80-XAQ-Untitled.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport sqlite3\n\npt = os.getcwd()\nalarm = pt + \"\\\\C.csv\"\n\n\ndef conv_2_list(ls1, ls2, ls3):\n ToDf = pd.DataFrame(zip(ls1, ls2, l3))\n print(Todf)\n \n\nl0 = [\"0\", \"1\", \"2\", \"3\", \"4\"]\nl1 = [\"Amar\", \"Barsha\", \"Carlos\", \"Tanmay\", \"Misbah\"] \nl2 = [\"Alpha\", \"Bravo\", \"Charlie\", \"Tango\", \"Mike\"] \n#conv_2_list(l0,l1,l2)\n\n\ndef concat(v1,v2):\n z = str(v1) + '-' + str(v2)\n return z\n\nCDCT = lambda x : x[:4] if (len(x) >= 6) else \"NF\"\n\ndef df_add_col(dff,nwcol):\n df = dff.replace(r'^\\s*$', np.NaN, regex=True)\n for i in range(len(df)):\n df.loc[i,nwcol] = concat(df.loc[i,\"CUSTOMATTR15\"],df.loc[i,\"SUMMARY\"])\n return df\n\n\n\n\ndf0 = pd.read_csv(alarm)\ndf1 = df0[['SERIAL','CUSTOMATTR15','SUMMARY','LASTOCCURRENCE','CLEARTIMESTAMP','CUSTOMATTR3']]\nx = df_add_col(df1,'scode')\nls = x.columns.to_list()\nprint(ls)\n#print(x)\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
Chenzhoujia/Mobile_hand_tf-gnn-samples-master
[ "56236752bb06bcc60b4befb024cdd100aa9ce9ac" ]
[ "tasks/varmisuse_task.py" ]
[ "import re\nfrom collections import defaultdict\nfrom multiprocessing import Process, Queue, cpu_count\nfrom typing import Any, Dict, Iterable, List, NamedTuple, Set, Iterator\n\nimport tensorflow as tf\nimport numpy as np\nfrom dpu_utils.utils import RichPath\nfrom dpu_utils.codeutils import split_identifier_into_parts, get_language_keywords\n\nfrom .sparse_graph_task import Sparse_Graph_Task, DataFold, MinibatchData\nfrom utils import BIG_NUMBER\n\n\nALPHABET = \"abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\"\nALPHABET_DICT = {char: idx + 2 for (idx, char) in enumerate(ALPHABET)} # \"0\" is PAD, \"1\" is UNK\nALPHABET_DICT[\"PAD\"] = 0\nALPHABET_DICT[\"UNK\"] = 1\nUSES_SUBTOKEN_EDGE_NAME = \"UsesSubtoken\"\nSELF_LOOP_EDGE_NAME = \"SelfLoop\"\nBACKWARD_EDGE_TYPE_NAME_SUFFIX = \"_Bkwd\"\n__PROGRAM_GRAPH_EDGES_TYPES = [\"Child\", \"NextToken\", \"LastUse\", \"LastWrite\", \"LastLexicalUse\", \"ComputedFrom\",\n \"GuardedByNegation\", \"GuardedBy\", \"FormalArgName\", \"ReturnsTo\", USES_SUBTOKEN_EDGE_NAME]\n__PROGRAM_GRAPH_EDGES_TYPES_WITH_BKWD = \\\n __PROGRAM_GRAPH_EDGES_TYPES + [edge_type_name + BACKWARD_EDGE_TYPE_NAME_SUFFIX\n for edge_type_name in __PROGRAM_GRAPH_EDGES_TYPES]\nPROGRAM_GRAPH_EDGES_TYPES_VOCAB = {edge_type_name: idx\n for idx, edge_type_name in enumerate(__PROGRAM_GRAPH_EDGES_TYPES_WITH_BKWD)}\n\n\nclass GraphSample(NamedTuple):\n adjacency_lists: List[np.ndarray]\n type_to_node_to_num_incoming_edges: np.ndarray\n unique_labels_as_characters: np.ndarray\n node_labels_to_unique_labels: np.ndarray\n slot_node_id: int\n variable_candidate_nodes: np.ndarray\n variable_candidate_nodes_mask: np.ndarray\n\n\ndef _add_per_subtoken_nodes(unsplittable_node_names: Set[str], graph_dict: Dict[str, Any]) -> None:\n graph_node_labels = graph_dict['NodeLabels']\n subtoken_to_using_nodes = defaultdict(set)\n\n max_used_node_id = 0\n for node_id, node_label in graph_node_labels.items():\n node_id = int(node_id)\n max_used_node_id = max(node_id, max_used_node_id)\n\n # Skip AST nodes and punctuation:\n if node_label in unsplittable_node_names:\n continue\n\n for subtoken in split_identifier_into_parts(node_label):\n if re.search('[a-zA-Z0-9]', subtoken):\n subtoken_to_using_nodes[subtoken].add(node_id)\n\n subtoken_node_id = max_used_node_id\n new_edges = []\n for subtoken, using_nodes in subtoken_to_using_nodes.items():\n subtoken_node_id += 1\n graph_node_labels[str(subtoken_node_id)] = subtoken\n new_edges.extend([(using_node_id, subtoken_node_id)\n for using_node_id in using_nodes])\n\n graph_dict['Edges'][USES_SUBTOKEN_EDGE_NAME] = new_edges\n\n\ndef _load_single_sample(raw_sample: Dict[str, Any],\n unsplittable_node_names: Set[str],\n graph_node_label_max_num_chars: int,\n max_variable_candidates: int = 5,\n add_self_loop_edges: bool = False):\n _add_per_subtoken_nodes(unsplittable_node_names, raw_sample['ContextGraph'])\n num_nodes = len(raw_sample['ContextGraph']['NodeLabels'])\n\n node_label_chars = np.zeros(shape=(num_nodes, graph_node_label_max_num_chars),\n dtype=np.uint8)\n for (node, label) in raw_sample['ContextGraph']['NodeLabels'].items():\n for (char_idx, label_char) in enumerate(label[:graph_node_label_max_num_chars].lower()):\n node_label_chars[int(node), char_idx] = ALPHABET_DICT.get(label_char, 1)\n node_label_chars_unique, node_label_chars_indices = np.unique(node_label_chars,\n axis=0,\n return_inverse=True)\n\n # Split edges according to edge_type and count their numbers:\n num_edge_types = len(PROGRAM_GRAPH_EDGES_TYPES_VOCAB)\n adjacency_lists = [np.zeros((0, 2), dtype=np.int32) for _ in range(num_edge_types)]\n num_incoming_edges_per_type = np.zeros((num_edge_types, num_nodes), dtype=np.uint16)\n raw_edges = raw_sample['ContextGraph']['Edges']\n for e_type, e_type_edges in raw_edges.items():\n if len(e_type_edges) > 0:\n e_type_bkwd = e_type + BACKWARD_EDGE_TYPE_NAME_SUFFIX\n e_type_idx = PROGRAM_GRAPH_EDGES_TYPES_VOCAB[e_type]\n e_type_bkwd_idx = PROGRAM_GRAPH_EDGES_TYPES_VOCAB[e_type_bkwd]\n\n fwd_edges = np.array(e_type_edges, dtype=np.int32)\n bkwd_edges = np.flip(fwd_edges, axis=1)\n\n adjacency_lists[e_type_idx] = fwd_edges\n adjacency_lists[e_type_bkwd_idx] = bkwd_edges\n num_incoming_edges_per_type[e_type_idx, :] = \\\n np.bincount(adjacency_lists[e_type_idx][:, 1], minlength=num_nodes)\n num_incoming_edges_per_type[e_type_bkwd_idx, :] = \\\n np.bincount(adjacency_lists[e_type_bkwd_idx][:, 1], minlength=num_nodes)\n\n if add_self_loop_edges:\n self_loop_edge_type_idx = PROGRAM_GRAPH_EDGES_TYPES_VOCAB[SELF_LOOP_EDGE_NAME]\n adjacency_lists[self_loop_edge_type_idx] = \\\n np.stack([np.arange(num_nodes), np.arange(num_nodes)], axis=1)\n num_incoming_edges_per_type[self_loop_edge_type_idx, :] = \\\n np.ones(shape=(num_nodes,))\n\n # VarMisuse-specific things: Reorder symbol candidates so that correct one is first.\n correct_candidate_id = None\n distractor_candidate_ids = [] # type: List[int]\n for candidate in raw_sample['SymbolCandidates']:\n if candidate['IsCorrect']:\n correct_candidate_id = candidate['SymbolDummyNode']\n else:\n distractor_candidate_ids.append(candidate['SymbolDummyNode'])\n assert correct_candidate_id is not None\n candidate_node_ids = [correct_candidate_id] + distractor_candidate_ids[:max_variable_candidates - 1]\n # Pad symbol candidates up to max_variable_candidates:\n num_scope_padding = max_variable_candidates - len(candidate_node_ids)\n candidate_node_ids_mask = [True] * len(candidate_node_ids) + [False] * num_scope_padding\n candidate_node_ids = candidate_node_ids + [0] * num_scope_padding\n\n return GraphSample(adjacency_lists=adjacency_lists,\n type_to_node_to_num_incoming_edges=num_incoming_edges_per_type,\n unique_labels_as_characters=node_label_chars_unique,\n node_labels_to_unique_labels=node_label_chars_indices,\n slot_node_id=raw_sample['SlotDummyNode'],\n variable_candidate_nodes=np.array(candidate_node_ids),\n variable_candidate_nodes_mask=np.array(candidate_node_ids_mask),\n )\n\n\ndef _data_loading_worker(path_queue: Queue,\n result_queue: Queue,\n unsplittable_node_names: Set[str],\n graph_node_label_max_num_chars: int,\n max_variable_candidates: int,\n add_self_loop_edges: bool,\n ) -> None:\n while True:\n next_path = path_queue.get()\n if next_path is None: # Our signal that all files have been processed\n path_queue.put(None) # Signal to the other workers\n result_queue.put(None) # Signal to the controller that we are done\n break\n\n # Read the file and push examples out as soon as we get them:\n for raw_sample in next_path.read_by_file_suffix():\n result_queue.put(_load_single_sample(raw_sample,\n unsplittable_node_names,\n graph_node_label_max_num_chars,\n max_variable_candidates,\n add_self_loop_edges,\n ))\n\n\ndef _load_data(paths: List[RichPath],\n unsplittable_node_names: Set[str],\n graph_node_label_max_num_chars: int,\n max_variable_candidates: int,\n add_self_loop_edges: bool,\n no_parallel: bool = False,\n ) -> Iterable[GraphSample]:\n if no_parallel:\n return [_load_single_sample(raw_sample, unsplittable_node_names, graph_node_label_max_num_chars,\n max_variable_candidates, add_self_loop_edges)\n for path in paths\n for raw_sample in path.read_by_file_suffix()]\n\n path_queue = Queue(maxsize=len(paths) + 1)\n result_queue = Queue()\n\n # Set up list of work to do:\n for path in paths:\n path_queue.put(path)\n path_queue.put(None) # Signal for the end of the queue\n\n # Set up workers:\n workers = []\n for _ in range(cpu_count()):\n workers.append(Process(target=_data_loading_worker,\n args=(path_queue,\n result_queue,\n unsplittable_node_names,\n graph_node_label_max_num_chars,\n max_variable_candidates,\n add_self_loop_edges,\n )))\n workers[-1].start()\n\n # Consume the data:\n num_workers_terminated = 0\n while num_workers_terminated < len(workers):\n parsed_sample = result_queue.get()\n if parsed_sample is None:\n num_workers_terminated += 1 # Worker signaled that it's done\n else:\n yield parsed_sample\n\n # Clean up the workers:\n for worker in workers:\n worker.join()\n\n\nclass VarMisuse_Task(Sparse_Graph_Task):\n @classmethod\n def default_params(cls):\n params = super().default_params()\n params.update({\n 'max_variable_candidates': 5,\n 'graph_node_label_max_num_chars': 19,\n 'graph_node_label_representation_size': 64,\n 'slot_score_via_linear_layer': True,\n 'loss_function': 'max-likelihood', # max-likelihood or max-margin\n 'max-margin_loss_margin': 0.2,\n 'out_layer_dropout_rate': 0.2,\n 'add_self_loop_edges': False,\n # 'max_num_data_files': 3,\n })\n return params\n\n @staticmethod\n def name() -> str:\n return \"VarMisuse\"\n\n @staticmethod\n def default_data_path() -> str:\n return \"data/varmisuse\"\n\n def __init__(self, params: Dict[str, Any]):\n super().__init__(params)\n\n # If required, add the self-loop edge type to the vocab:\n if params.get('add_self_loop_edges'):\n if SELF_LOOP_EDGE_NAME not in PROGRAM_GRAPH_EDGES_TYPES_VOCAB:\n PROGRAM_GRAPH_EDGES_TYPES_VOCAB[SELF_LOOP_EDGE_NAME] = \\\n len(PROGRAM_GRAPH_EDGES_TYPES_VOCAB)\n\n def get_metadata(self) -> Dict[str, Any]:\n metadata = super().get_metadata()\n return metadata\n\n def restore_from_metadata(self, metadata: Dict[str, Any]) -> None:\n super().restore_from_metadata(metadata)\n\n @property\n def num_edge_types(self) -> int:\n return len(PROGRAM_GRAPH_EDGES_TYPES_VOCAB)\n\n @property\n def initial_node_feature_size(self) -> int:\n return self.params['graph_node_label_representation_size']\n\n # -------------------- Data Loading --------------------\n def load_data(self, path: RichPath) -> None:\n # Note that as __load_data produces a generator, we explicitly force loading\n # (and caching) here:\n self._loaded_data[DataFold.TRAIN] = \\\n list(self.__load_data(path.join(\"graphs-train\"), DataFold.TRAIN))\n self._loaded_data[DataFold.VALIDATION] = \\\n list(self.__load_data(path.join(\"graphs-valid\"), DataFold.VALIDATION))\n\n def load_eval_data_from_path(self, path: RichPath) -> Iterable[Any]:\n if path.path == self.default_data_path():\n path = path.join(\"graphs-test\")\n return iter(self.__load_data(path, DataFold.TEST))\n\n def __load_data(self, data_dir: RichPath, data_fold: DataFold) -> Iterator[GraphSample]:\n all_data_files = data_dir.iterate_filtered_files_in_dir(\"*.gz\")\n\n max_num_files = self.params.get('max_num_data_files', None)\n if max_num_files is not None:\n all_data_files = sorted(all_data_files)[:max_num_files]\n else:\n all_data_files = list(all_data_files)\n print(\" Loading VarMisuse data from %s [%i data files].\" % (data_dir, len(all_data_files)))\n\n unsplittable_keywords = get_language_keywords('csharp')\n return _load_data(all_data_files,\n unsplittable_keywords,\n self.params['graph_node_label_max_num_chars'],\n self.params['max_variable_candidates'],\n self.params['add_self_loop_edges'])\n\n # -------------------- Model Construction --------------------\n def make_task_input_model(self,\n placeholders: Dict[str, tf.Tensor],\n model_ops: Dict[str, tf.Tensor],\n ) -> None:\n node_label_char_length = self.params['graph_node_label_max_num_chars']\n placeholders['unique_labels_as_characters'] = \\\n tf.placeholder(dtype=tf.int32, shape=[None, node_label_char_length], name='unique_labels_as_characters')\n placeholders['node_labels_to_unique_labels'] = \\\n tf.placeholder(dtype=tf.int32, shape=[None], name='node_labels_to_unique_labels')\n placeholders['adjacency_lists'] = \\\n [tf.placeholder(dtype=tf.int32, shape=[None, 2], name='adjacency_e%s' % e)\n for e in range(self.num_edge_types)]\n placeholders['type_to_num_incoming_edges'] = \\\n tf.placeholder(dtype=tf.float32, shape=[self.num_edge_types, None], name='type_to_num_incoming_edges')\n\n model_ops['initial_node_features'] = \\\n self.__get_node_label_charcnn_embeddings(placeholders['unique_labels_as_characters'],\n placeholders['node_labels_to_unique_labels'])\n model_ops['adjacency_lists'] = placeholders['adjacency_lists']\n model_ops['type_to_num_incoming_edges'] = placeholders['type_to_num_incoming_edges']\n\n def __get_node_label_charcnn_embeddings(self,\n unique_labels_as_characters: tf.Tensor,\n node_labels_to_unique_labels: tf.Tensor,\n ) -> tf.Tensor:\n \"\"\"\n Compute representation of node labels using a 2-layer character CNN.\n\n Args:\n unique_labels_as_characters: int32 tensor of shape [U, C]\n representing the unique (node) labels occurring in a\n batch, where U is the number of such labels and C the\n maximal number of characters.\n node_labels_to_unique_labels: int32 tensor of shape [V],\n mapping each node in the batch to one of the unique\n labels.\n\n Returns:\n float32 tensor of shape [V, D] representing embedded node\n label information about each node.\n \"\"\"\n label_embedding_size = self.params['graph_node_label_representation_size'] # D\n # U ~ num unique labels\n # C ~ num characters (self.params['graph_node_label_max_num_chars'])\n # A ~ num characters in alphabet\n unique_label_chars_one_hot = tf.one_hot(indices=unique_labels_as_characters,\n depth=len(ALPHABET),\n axis=-1) # Shape: [U, C, A]\n\n # Choose kernel sizes such that there is a single value at the end:\n char_conv_l1_kernel_size = 5\n char_conv_l2_kernel_size = \\\n self.params['graph_node_label_max_num_chars'] - 2 * (char_conv_l1_kernel_size - 1)\n\n char_conv_l1 = \\\n tf.keras.layers.Conv1D(filters=16,\n kernel_size=char_conv_l1_kernel_size,\n activation=tf.nn.leaky_relu,\n )(unique_label_chars_one_hot) # Shape: [U, C - (char_conv_l1_kernel_size - 1), 16]\n char_pool_l1 = \\\n tf.keras.layers.MaxPool1D(pool_size=char_conv_l1_kernel_size,\n strides=1,\n )(inputs=char_conv_l1) # Shape: [U, C - 2*(char_conv_l1_kernel_size - 1), 16]\n char_conv_l2 = \\\n tf.keras.layers.Conv1D(filters=label_embedding_size,\n kernel_size=char_conv_l2_kernel_size,\n activation=tf.nn.leaky_relu,\n )(char_pool_l1) # Shape: [U, 1, D]\n unique_label_representations = tf.squeeze(char_conv_l2, axis=1) # Shape: [U, D]\n node_label_representations = tf.gather(params=unique_label_representations,\n indices=node_labels_to_unique_labels)\n return node_label_representations\n\n def make_task_output_model(self,\n placeholders: Dict[str, tf.Tensor],\n model_ops: Dict[str, tf.Tensor],\n ) -> None:\n placeholders['slot_node_ids'] = \\\n tf.placeholder(dtype=tf.int32, shape=[None], name='slot_node_ids')\n placeholders['candidate_node_ids'] = \\\n tf.placeholder(dtype=tf.int32, shape=[None, None], name='candidate_node_ids')\n placeholders['candidate_node_ids_mask'] = \\\n tf.placeholder(dtype=tf.float32, shape=[None, None], name='candidate_node_ids_mask')\n placeholders['out_layer_dropout_rate'] = \\\n tf.placeholder_with_default(0.0, shape=[], name='out_layer_dropout_rate')\n\n final_node_repr_size = model_ops['final_node_representations'].shape.as_list()[-1]\n num_candidate_vars = self.params['max_variable_candidates']\n\n final_node_states = \\\n tf.nn.dropout(model_ops['final_node_representations'],\n keep_prob=placeholders['out_layer_dropout_rate']) # Shape: [V, D]\n\n # --- (1) Collect representation of slots and candidates:\n slot_representations = \\\n tf.gather(params=final_node_states, indices=placeholders['slot_node_ids']) # Shape: [G, D]\n # Make things fit into 1D gather:\n candidate_node_ids = tf.reshape(placeholders['candidate_node_ids'], shape=[-1])\n candidate_representations = \\\n tf.gather(params=final_node_states, indices=candidate_node_ids) # Shape: [G * Cands, D]\n candidate_representations = \\\n tf.reshape(candidate_representations,\n shape=[-1, num_candidate_vars, final_node_repr_size]) # Shape: [G, Cands, D]\n\n # --- (2) Compute match between final candidate representations and slot representation:\n slot_candidate_inner_product = \\\n tf.einsum('sd,scd->sc', slot_representations, candidate_representations) # Shape: [G, Cands]\n\n if self.params['slot_score_via_linear_layer']:\n repeated_slots = tf.tile(tf.expand_dims(slot_representations, axis=1),\n multiples=[1, num_candidate_vars, 1]) # Shape: [G, Cands, D]\n slot_cand_comb = tf.concat([candidate_representations,\n repeated_slots,\n tf.expand_dims(slot_candidate_inner_product, -1)],\n axis=2) # Shape: [G, Cands, 2*D + 1]\n logits = tf.keras.layers.Dense(units=1,\n use_bias=False,\n activation=None,\n name='slot_score_linear_layer'\n )(slot_cand_comb) # Shape: [G, Cands, 1]\n logits = tf.squeeze(logits, axis=-1) # Shape: [G, Cands]\n else:\n logits = slot_candidate_inner_product\n\n logits += (1.0 - placeholders['candidate_node_ids_mask']) * -BIG_NUMBER\n\n # --- (3) Compute loss & metrics:\n loss_function = self.params['loss_function']\n # Note that by convention, the first candidate is always the correct one:\n correct_choices = tf.zeros([tf.shape(logits)[0]], dtype=tf.int32)\n if loss_function == 'max-likelihood':\n per_graph_loss = \\\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=correct_choices, logits=logits)\n elif loss_function == 'max-margin':\n log_probs = tf.nn.log_softmax(logits)\n correct_log_prob = log_probs[:, 0]\n max_wrong_log_prob = tf.reduce_max(log_probs[:, 1:], axis=1)\n per_graph_loss = \\\n tf.nn.relu(max_wrong_log_prob - correct_log_prob + self.parameters['loss_margin'])\n else:\n raise Exception('Invalid loss function option: \"%s\"' % loss_function)\n\n prediction_is_correct = tf.equal(tf.argmax(tf.nn.softmax(logits), 1, output_type=tf.int32),\n correct_choices)\n accuracy = tf.reduce_mean(tf.cast(prediction_is_correct, tf.float32))\n\n tf.summary.scalar('accuracy', accuracy)\n model_ops['task_metrics'] = {\n 'loss': tf.reduce_mean(per_graph_loss),\n 'total_loss': tf.reduce_sum(per_graph_loss),\n 'accuracy': accuracy,\n 'num_correct_predictions': tf.reduce_sum(tf.cast(prediction_is_correct, tf.int32)),\n }\n\n # -------------------- Minibatching and training loop --------------------\n def make_minibatch_iterator(self,\n data: Iterable[Any],\n data_fold: DataFold,\n model_placeholders: Dict[str, tf.Tensor],\n max_nodes_per_batch: int) \\\n -> Iterable[MinibatchData]:\n if data_fold == DataFold.TRAIN:\n np.random.shuffle(data)\n\n if isinstance(data, Iterator):\n data_iter = data\n else:\n data_iter = iter(data)\n\n def init_raw_batch_data_holder() -> Dict[str, Any]:\n return {\n 'adj_lists': [[] for _ in range(self.num_edge_types)],\n 'type_to_num_in_edges': [],\n 'uniq_labels_as_chars': [],\n 'node_labels_to_uniq_labels': [],\n 'slot_node_ids': [],\n 'candidate_node_ids': [],\n 'candidate_node_ids_mask': [],\n 'num_graphs': 0,\n 'node_offset': 0,\n 'unique_label_offset': 0,\n }\n\n def finalise_batch_data(raw_batch_data: Dict[str, Any]) -> MinibatchData:\n batch_feed_dict = {\n model_placeholders['unique_labels_as_characters']: np.concatenate(raw_batch_data['uniq_labels_as_chars'], axis=0),\n model_placeholders['node_labels_to_unique_labels']: np.concatenate(raw_batch_data['node_labels_to_uniq_labels'], axis=0),\n model_placeholders['type_to_num_incoming_edges']: np.concatenate(raw_batch_data['type_to_num_in_edges'], axis=1),\n model_placeholders['slot_node_ids']: raw_batch_data['slot_node_ids'],\n model_placeholders['candidate_node_ids']: raw_batch_data['candidate_node_ids'],\n model_placeholders['candidate_node_ids_mask']: raw_batch_data['candidate_node_ids_mask'],\n }\n\n if data_fold == DataFold.TRAIN:\n model_placeholders['out_layer_dropout_rate'] = self.params['out_layer_dropout_rate']\n\n # Merge adjacency lists:\n num_edges = 0\n for i in range(self.num_edge_types):\n if len(raw_batch_data['adj_lists'][i]) > 0:\n adj_list = np.concatenate(raw_batch_data['adj_lists'][i])\n else:\n adj_list = np.zeros((0, 2), dtype=np.int32)\n num_edges += adj_list.shape[0]\n batch_feed_dict[model_placeholders['adjacency_lists'][i]] = adj_list\n\n return MinibatchData(feed_dict=batch_feed_dict,\n num_graphs=raw_batch_data['num_graphs'],\n num_nodes=raw_batch_data['node_offset'],\n num_edges=num_edges)\n\n try:\n cur_batch_data = init_raw_batch_data_holder()\n while True:\n cur_graph = next(data_iter)\n # We pack until we cannot fit more graphs in the batch, yield, and continue:\n if cur_batch_data['node_offset'] + len(cur_graph.node_labels_to_unique_labels) >= max_nodes_per_batch:\n yield finalise_batch_data(cur_batch_data)\n cur_batch_data = init_raw_batch_data_holder()\n\n # Graph structure:\n for i in range(self.num_edge_types):\n cur_batch_data['adj_lists'][i].append(cur_graph.adjacency_lists[i] + cur_batch_data['node_offset'])\n cur_batch_data['type_to_num_in_edges'].append(cur_graph.type_to_node_to_num_incoming_edges)\n\n # Node labels:\n cur_batch_data['uniq_labels_as_chars'].append(cur_graph.unique_labels_as_characters)\n cur_batch_data['node_labels_to_uniq_labels'].append(\n cur_graph.node_labels_to_unique_labels + cur_batch_data['unique_label_offset'])\n cur_batch_data['unique_label_offset'] += cur_graph.unique_labels_as_characters.shape[0]\n\n # VarMisuse task bits:\n cur_batch_data['slot_node_ids'].append(cur_graph.slot_node_id + cur_batch_data['node_offset'])\n cur_batch_data['candidate_node_ids'].append(cur_graph.variable_candidate_nodes + cur_batch_data['node_offset'])\n cur_batch_data['candidate_node_ids_mask'].append(cur_graph.variable_candidate_nodes_mask)\n\n # Finally, update the offset we use to shift things during batch construction:\n cur_batch_data['num_graphs'] += 1\n cur_batch_data['node_offset'] += len(cur_graph.node_labels_to_unique_labels)\n except StopIteration:\n # Final batch, yield only if non-empty:\n if cur_batch_data['num_graphs'] > 0:\n yield finalise_batch_data(cur_batch_data)\n\n def early_stopping_metric(self, task_metric_results: List[Dict[str, np.ndarray]], num_graphs: int) -> float:\n # Early stopping based on accuracy; as we are trying to minimize, negate it:\n acc = sum([m['num_correct_predictions'] for m in task_metric_results]) / float(num_graphs)\n return -acc\n\n def pretty_print_epoch_task_metrics(self, task_metric_results: List[Dict[str, np.ndarray]], num_graphs: int) -> str:\n acc = sum([m['num_correct_predictions'] for m in task_metric_results]) / float(num_graphs)\n return \"Accuracy: %.3f\" % (acc,)\n" ]
[ [ "numpy.ones", "tensorflow.summary.scalar", "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.squeeze", "tensorflow.einsum", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.nn.softmax", "tensorflow.reduce_sum", "tensorflow.nn.dropout", "tensorflow.keras.layers.Conv1D", "numpy.concatenate", "tensorflow.nn.log_softmax", "tensorflow.keras.layers.Dense", "numpy.unique", "tensorflow.nn.relu", "numpy.bincount", "tensorflow.shape", "numpy.zeros", "tensorflow.expand_dims", "numpy.arange", "tensorflow.cast", "tensorflow.placeholder_with_default", "tensorflow.placeholder", "numpy.random.shuffle", "tensorflow.reduce_mean", "numpy.flip", "numpy.array", "tensorflow.gather", "tensorflow.keras.layers.MaxPool1D" ] ]
Mehrad0711/oslo
[ "873d771a68bc380903947010da0b66f58f60e496" ]
[ "oslo/pytorch/kernel_fusion/cuda/fused_normalization.py" ]
[ "import numbers\nfrom typing import Optional, Sequence\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\n\nfrom oslo.pytorch.kernel_fusion.cuda import CUDA\n\n\ndef _get_autocast_dtypes() -> Sequence[torch.dtype]:\n if torch.cuda.is_bf16_supported():\n return [torch.half, torch.bfloat16]\n return [torch.half]\n\n\ndef _get_current_dtype(dtype: Optional[torch.dtype] = None) -> torch.dtype:\n if not torch.is_autocast_enabled():\n return torch.float or dtype\n else:\n return torch.get_autocast_gpu_dtype()\n\n\ndef _cast_if_autocast_enabled(*args):\n if not torch.is_autocast_enabled():\n return args\n else:\n return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())\n\n\nclass FusedLayerNormAffineFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, weight, bias, normalized_shape, eps):\n ctx.normalized_shape = normalized_shape\n ctx.eps = eps\n input_ = input.contiguous()\n weight_ = weight.contiguous()\n bias_ = bias.contiguous()\n output, mean, invvar = CUDA.layer_norm_forward_affine(\n input_, ctx.normalized_shape, weight_, bias_, ctx.eps\n )\n ctx.save_for_backward(input_, weight_, bias_, mean, invvar)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n input_, weight_, bias_, mean, invvar = ctx.saved_tensors\n grad_input = grad_weight = grad_bias = None\n grad_input, grad_weight, grad_bias = CUDA.layer_norm_backward_affine(\n grad_output.contiguous(),\n mean,\n invvar,\n input_,\n ctx.normalized_shape,\n weight_,\n bias_,\n ctx.eps,\n )\n return grad_input, grad_weight, grad_bias, None, None\n\n\nclass FusedRMSNormAffineFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, weight, normalized_shape, eps):\n ctx.normalized_shape = normalized_shape\n ctx.eps = eps\n input_ = input.contiguous()\n weight_ = weight.contiguous()\n output, invvar = CUDA.rms_norm_forward_affine(\n input_, ctx.normalized_shape, weight_, ctx.eps\n )\n ctx.save_for_backward(input_, weight_, invvar)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n input_, weight_, invvar = ctx.saved_tensors\n grad_input = grad_weight = None\n grad_input, grad_weight = CUDA.rms_norm_backward_affine(\n grad_output.contiguous(),\n invvar,\n input_,\n ctx.normalized_shape,\n weight_,\n ctx.eps,\n )\n return grad_input, grad_weight, None, None\n\n\nclass FusedLayerNormAffineMixedDtypesFunction(FusedLayerNormAffineFunction):\n @staticmethod\n def forward(ctx, input, weight, bias, normalized_shape, eps):\n ctx.normalized_shape = normalized_shape\n ctx.eps = eps\n input_ = input.contiguous()\n weight_ = weight.contiguous()\n bias_ = bias.contiguous()\n output, mean, invvar = CUDA.layer_norm_forward_affine_mixed_dtypes(\n input_, ctx.normalized_shape, weight_, bias_, ctx.eps\n )\n ctx.save_for_backward(input_, weight_, bias_, mean, invvar)\n return output\n\n\nclass FusedRMSNormAffineMixedDtypesFunction(FusedRMSNormAffineFunction):\n @staticmethod\n def forward(ctx, input, weight, normalized_shape, eps):\n ctx.normalized_shape = normalized_shape\n ctx.eps = eps\n input_ = input.contiguous()\n weight_ = weight.contiguous()\n output, invvar = CUDA.rms_norm_forward_affine_mixed_dtypes(\n input_, ctx.normalized_shape, weight_, ctx.eps\n )\n\n ctx.save_for_backward(input_, weight_, invvar)\n return output\n\n\nclass FusedLayerNormFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, normalized_shape, eps):\n ctx.normalized_shape = normalized_shape\n ctx.eps = eps\n input_ = input.contiguous()\n output, mean, invvar = CUDA.layer_norm_forward(\n input_, ctx.normalized_shape, ctx.eps\n )\n ctx.save_for_backward(input_, mean, invvar)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n input_, mean, invvar = ctx.saved_tensors\n grad_input = None\n grad_input = CUDA.layer_norm_backward(\n grad_output.contiguous(),\n mean,\n invvar,\n input_,\n ctx.normalized_shape,\n ctx.eps,\n )\n return grad_input, None, None\n\n\nclass FusedRMSNormFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, normalized_shape, eps):\n ctx.normalized_shape = normalized_shape\n ctx.eps = eps\n input_ = input.contiguous()\n output, invvar = CUDA.rms_norm_forward(input_, ctx.normalized_shape, ctx.eps)\n ctx.save_for_backward(input_, invvar)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n input_, invvar = ctx.saved_tensors\n grad_input = None\n grad_input = CUDA.rms_norm_backward(\n grad_output.contiguous(), invvar, input_, ctx.normalized_shape, ctx.eps\n )\n return grad_input, None, None\n\n\ndef fused_layer_norm_affine(input, weight, bias, normalized_shape, eps=1e-6):\n args = _cast_if_autocast_enabled(input, weight, bias, normalized_shape, eps)\n with torch.cuda.amp.autocast(enabled=False):\n return FusedLayerNormAffineFunction.apply(*args)\n\n\ndef fused_layer_norm(input, normalized_shape, eps=1e-6):\n args = _cast_if_autocast_enabled(input, normalized_shape, eps)\n with torch.cuda.amp.autocast(enabled=False):\n return FusedLayerNormFunction.apply(*args)\n\n\ndef mixed_dtype_fused_layer_norm_affine(\n input, weight, bias, normalized_shape, eps=1e-6\n):\n args = _cast_if_autocast_enabled(input, weight, bias, normalized_shape, eps)\n with torch.cuda.amp.autocast(enabled=False):\n return FusedLayerNormAffineMixedDtypesFunction.apply(*args)\n\n\ndef fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):\n args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)\n with torch.cuda.amp.autocast(enabled=False):\n return FusedRMSNormAffineFunction.apply(*args)\n\n\ndef fused_rms_norm(input, normalized_shape, eps=1e-6):\n args = _cast_if_autocast_enabled(input, normalized_shape, eps)\n with torch.cuda.amp.autocast(enabled=False):\n return FusedRMSNormFunction.apply(*args)\n\n\ndef mixed_dtype_fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):\n args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)\n with torch.cuda.amp.autocast(enabled=False):\n return FusedRMSNormAffineMixedDtypesFunction.apply(*args)\n\n\nclass FusedLayerNorm(torch.nn.Module):\n r\"\"\"Applies Layer Normalization over a mini-batch of inputs as described in\n the paper `Layer Normalization`_ .\n Currently only runs on cuda() tensors.\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta\n The mean and standard-deviation are calculated separately over the last\n certain number dimensions which have to be of the shape specified by\n :attr:`normalized_shape`.\n :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of\n :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.\n .. note::\n Unlike Batch Normalization and Instance Normalization, which applies\n scalar scale and bias for each entire channel/plane with the\n :attr:`affine` option, Layer Normalization applies per-element scale and\n bias with :attr:`elementwise_affine`.\n This layer uses statistics computed from input data in both training and\n evaluation modes.\n Args:\n normalized_shape (int or list or torch.Size): input shape from an expected input\n of size\n .. math::\n [* \\times \\text{normalized}\\_\\text{shape}[0] \\times \\text{normalized}\\_\\text{shape}[1]\n \\times \\ldots \\times \\text{normalized}\\_\\text{shape}[-1]]\n If a single integer is used, it is treated as a singleton list, and this module will\n normalize over the last dimension which is expected to be of that specific size.\n eps: a value added to the denominator for numerical stability. Default: 1e-5\n elementwise_affine: a boolean value that when set to ``True``, this module\n has learnable per-element affine parameters initialized to ones (for weights)\n and zeros (for biases). Default: ``True``.\n Shape:\n - Input: :math:`(N, *)`\n - Output: :math:`(N, *)` (same shape as input)\n Examples::\n >>> input = torch.randn(20, 5, 10, 10)\n >>> # With Learnable Parameters\n >>> m = apex.normalization.FusedLayerNorm(input.size()[1:])\n >>> # Without Learnable Parameters\n >>> m = apex.normalization.FusedLayerNorm(input.size()[1:], elementwise_affine=False)\n >>> # Normalize over last two dimensions\n >>> m = apex.normalization.FusedLayerNorm([10, 10])\n >>> # Normalize over last dimension of size 10\n >>> m = apex.normalization.FusedLayerNorm(10)\n >>> # Activating the module\n >>> output = m(input)\n .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450\n \"\"\"\n\n def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):\n super().__init__()\n if isinstance(normalized_shape, numbers.Integral):\n normalized_shape = (normalized_shape,)\n self.normalized_shape = torch.Size(normalized_shape)\n self.eps = eps\n self.elementwise_affine = elementwise_affine\n if self.elementwise_affine:\n self.weight = Parameter(torch.Tensor(*normalized_shape))\n self.bias = Parameter(torch.Tensor(*normalized_shape))\n else:\n self.register_parameter(\"weight\", None)\n self.register_parameter(\"bias\", None)\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.elementwise_affine:\n init.ones_(self.weight)\n init.zeros_(self.bias)\n\n def forward(self, input):\n if not input.is_cuda:\n return F.layer_norm(\n input, self.normalized_shape, self.weight, self.bias, self.eps\n )\n if self.elementwise_affine:\n return fused_layer_norm_affine(\n input, self.weight, self.bias, self.normalized_shape, self.eps\n )\n else:\n return fused_layer_norm(input, self.normalized_shape, self.eps)\n\n def extra_repr(self):\n return (\n \"{normalized_shape}, eps={eps}, \"\n \"elementwise_affine={elementwise_affine}\".format(**self.__dict__)\n )\n\n\nclass FusedRMSNorm(torch.nn.Module):\n r\"\"\"Applies RMS Normalization over a mini-batch of inputs\n Currently only runs on cuda() tensors.\n .. math::\n y = \\frac{x}{\\mathrm{RMS}[x]} * \\gamma\n The root-mean-square is calculated separately over the last\n certain number dimensions which have to be of the shape specified by\n :attr:`normalized_shape`.\n :math:`\\gamma` is a learnable affine transform parameter of\n :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.\n `epsilon` is added to the mean-square, then the root of the sum is taken.\n .. note::\n Unlike Batch Normalization and Instance Normalization, which applies\n scalar scale and bias for each entire channel/plane with the\n :attr:`affine` option, RMS Normalization applies per-element scale\n with :attr:`elementwise_affine`.\n This layer uses statistics computed from input data in both training and\n evaluation modes.\n Args:\n normalized_shape (int or list or torch.Size): input shape from an expected input\n of size\n .. math::\n [* \\times \\text{normalized}\\_\\text{shape}[0] \\times \\text{normalized}\\_\\text{shape}[1]\n \\times \\ldots \\times \\text{normalized}\\_\\text{shape}[-1]]\n If a single integer is used, it is treated as a singleton list, and this module will\n normalize over the last dimension which is expected to be of that specific size.\n eps: a value added to the denominator for numerical stability. Default: 1e-5\n elementwise_affine: a boolean value that when set to ``True``, this module\n has learnable per-element affine parameters initialized to ones (for weights)\n and zeros (for biases). Default: ``True``.\n Shape:\n - Input: :math:`(N, *)`\n - Output: :math:`(N, *)` (same shape as input)\n Examples::\n >>> input = torch.randn(20, 5, 10, 10)\n >>> # With Learnable Parameters\n >>> m = apex.normalization.FusedRMSNorm(input.size()[1:])\n >>> # Without Learnable Parameters\n >>> m = apex.normalization.FusedRMSNorm(input.size()[1:], elementwise_affine=False)\n >>> # Normalize over last two dimensions\n >>> m = apex.normalization.FusedRMSNorm([10, 10])\n >>> # Normalize over last dimension of size 10\n >>> m = apex.normalization.FusedRMSNorm(10)\n >>> # Activating the module\n >>> output = m(input)\n .. _`Root Mean Square Layer Normalization`: https://arxiv.org/pdf/1910.07467.pdf\n \"\"\"\n\n def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):\n super().__init__()\n if isinstance(normalized_shape, numbers.Integral):\n normalized_shape = (normalized_shape,)\n self.normalized_shape = torch.Size(normalized_shape)\n self.eps = eps\n self.elementwise_affine = elementwise_affine\n if self.elementwise_affine:\n self.weight = Parameter(torch.Tensor(*normalized_shape))\n else:\n self.register_parameter(\"weight\", None)\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.elementwise_affine:\n init.ones_(self.weight)\n\n def manual_rms_norm(self, input, normalized_shape, weight, eps):\n # layer norm should always be calculated in float32\n dims = tuple(i for i in range(-1, -len(normalized_shape) - 1, -1))\n variance = input.to(torch.float32).pow(2).mean(dims, keepdim=True)\n input = input * torch.rsqrt(variance + eps)\n\n if weight is None:\n return input\n\n # convert into half-precision if necessary\n if weight.dtype in [torch.float16, torch.bfloat16]:\n input = input.to(self.weight.dtype)\n\n return weight * input\n\n def forward(self, input):\n if not input.is_cuda:\n return self.manual_rms_norm(\n input, self.normalized_shape, self.weight, self.eps\n )\n\n if self.elementwise_affine:\n return fused_rms_norm_affine(\n input, self.weight, self.normalized_shape, self.eps\n )\n else:\n return fused_rms_norm(input, self.normalized_shape, self.eps)\n\n def extra_repr(self):\n return (\n \"{normalized_shape}, eps={eps}, \"\n \"elementwise_affine={elementwise_affine}\".format(**self.__dict__)\n )\n\n\n# NOTE (mkozuki): Why \"mixed\"?\n# MixedFusedLayerNorm differs from FusedLayerNorm in that this layer norm uses parameter's dtype\n# as output tensor's dtype while FusedLayerNorm uses input tensor's dtype for output tensor's dtype.\n# See: `layer_norm_affine` and `layer_norm_affine_mixed_dtypes` in \"csrc/layer_norm_cuda.cpp\"\nclass MixedFusedLayerNorm(FusedLayerNorm):\n def __init__(self, normalized_shape, eps=1e-5, **kwargs):\n if \"elementwise_affine\" in kwargs:\n import warnings\n\n warnings.warn(\n \"MixedFusedLayerNorm does not support `elementwise_affine` argument\"\n )\n elementwise_affine = kwargs.pop(\"elementwise_affine\")\n if not elementwise_affine:\n raise RuntimeError(\n \"MixedFusedLayerNorm does not support `elementwise_affine = False`\"\n )\n\n super().__init__(\n normalized_shape=normalized_shape, eps=eps, elementwise_affine=True\n )\n\n def forward(self, input: torch.Tensor):\n # NOTE (mkozuki): CPU path is here mainly for unittest sake.\n if not input.is_cuda:\n return F.layer_norm(\n input, self.normalized_shape, self.weight, self.bias, self.eps\n )\n return mixed_dtype_fused_layer_norm_affine(\n input, self.weight, self.bias, self.normalized_shape, self.eps\n )\n\n\n# Reference implementation from Huggingface\n\n\n# MixedFusedLayerNorm differs from FusedLayerNorm in that this layer norm uses parameter's dtype\n# as output tensor's dtype while FusedLayerNorm uses input tensor's dtype for output tensor's dtype.\n# See: `layer_norm_affine` and `layer_norm_affine_mixed_dtypes` in \"csrc/layer_norm_cuda.cpp\"\nclass MixedFusedRMSNorm(FusedRMSNorm):\n def __init__(self, normalized_shape, eps=1e-5, **kwargs):\n if \"elementwise_affine\" in kwargs:\n import warnings\n\n warnings.warn(\n \"MixedFusedRMSNorm does not support `elementwise_affine` argument\"\n )\n elementwise_affine = kwargs.pop(\"elementwise_affine\")\n if not elementwise_affine:\n raise RuntimeError(\n \"MixedFusedRMSNorm does not support `elementwise_affine = False`\"\n )\n\n super().__init__(\n normalized_shape=normalized_shape, eps=eps, elementwise_affine=True\n )\n\n def manual_rms_norm(self, input, normalized_shape, weight, eps):\n # layer norm should always be calculated in float32\n dims = tuple(i for i in range(-1, -len(normalized_shape) - 1, -1))\n variance = input.to(torch.float32).pow(2).mean(dims, keepdim=True)\n input = input * torch.rsqrt(variance + eps)\n\n if weight is None:\n return input\n\n # convert into half-precision if necessary\n if weight.dtype in [torch.float16, torch.bfloat16]:\n input = input.to(self.weight.dtype)\n\n return weight * input\n\n def forward(self, input: torch.Tensor):\n # NOTE (mkozuki): CPU path is here mainly for unittest sake.\n # TODO Manual RMS Norm Implementation Here\n if not input.is_cuda:\n return self.manual_rms_norm(\n input, self.normalized_shape, self.weight, self.eps\n )\n return mixed_dtype_fused_rms_norm_affine(\n input, self.weight, self.normalized_shape, self.eps\n )\n" ]
[ [ "torch.Size", "torch.nn.functional.layer_norm", "torch.is_autocast_enabled", "torch.get_autocast_gpu_dtype", "torch.nn.init.ones_", "torch.rsqrt", "torch.cuda.amp.autocast", "torch.cuda.is_bf16_supported", "torch.nn.init.zeros_", "torch.Tensor" ] ]
DataCanvasIO/YLearn
[ "d65b5afb83deed154c710de9096317165d95014a" ]
[ "tests/metalearner_test.py" ]
[ "from itertools import product\n\nimport pytest\nfrom sklearn import clone\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.linear_model import LinearRegression\n\nfrom ylearn.estimator_model.meta_learner import SLearner, TLearner, XLearner\nfrom . import _dgp\nfrom ._common import validate_leaner\n\n_test_settings = {\n # data_generator: model\n _dgp.generate_data_x1b_y1: GradientBoostingRegressor(),\n _dgp.generate_data_x1b_y2: LinearRegression(),\n _dgp.generate_data_x1m_y1: GradientBoostingRegressor(),\n _dgp.generate_data_x1b_y1_w5v0: GradientBoostingRegressor(),\n _dgp.generate_data_x1b_y2_w5v0: LinearRegression(),\n _dgp.generate_data_x1m_y1_w5v0: GradientBoostingRegressor(),\n}\n\n_test_settings_x2b = {\n # data_generator: model\n _dgp.generate_data_x2b_y1: GradientBoostingRegressor(),\n _dgp.generate_data_x2b_y2: LinearRegression(),\n _dgp.generate_data_x2b_y1_w5v0: GradientBoostingRegressor(),\n _dgp.generate_data_x2b_y2_w5v0: LinearRegression(),\n # _dgp.generate_data_x2b_y2_w5v0: MultiTaskLasso(),\n _dgp.generate_data_x2mb_y1: GradientBoostingRegressor(),\n}\n\n\[email protected]('dg,combined', product(_test_settings.keys(), [True, False]))\ndef test_sleaner(dg, combined):\n model = _test_settings[dg]\n validate_leaner(dg, SLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined),\n check_effect=dg.__name__.find('y2') < 0,\n )\n\n\[email protected]('dg,combined', product(_test_settings.keys(), [True, False]))\ndef test_sleaner_with_treat(dg, combined):\n model = _test_settings[dg]\n validate_leaner(dg, SLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined, treat=1, control=0),\n check_effect=dg.__name__.find('y2') < 0,\n )\n\n\[email protected]('dg,combined', product(_test_settings_x2b.keys(), [True, False]))\ndef test_sleaner_x2b(dg, combined):\n model = _test_settings_x2b[dg]\n validate_leaner(dg, SLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined, treat=[1, 1], control=[0, 0]),\n check_effect=dg.__name__.find('y2') < 0,\n )\n\n\ndef test_sleaner_with_treat_control():\n dg = _dgp.generate_data_x2b_y1\n model = GradientBoostingRegressor()\n validate_leaner(dg,\n TLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=True, treat=[1, 1], control=[0, 0]),\n )\n\n\[email protected]('dg,combined', product(_test_settings.keys(), [True, False]))\ndef test_tlearner(dg, combined):\n model = _test_settings[dg]\n validate_leaner(dg, TLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined),\n check_effect=dg.__name__.find('y2') < 0,\n )\n\n\[email protected]('dg,combined', product(_test_settings.keys(), [True, False]))\ndef test_tlearner_with_treat(dg, combined):\n model = _test_settings[dg]\n validate_leaner(dg, TLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined, treat=1, control=0),\n check_effect=dg.__name__.find('y2') < 0,\n )\n\n\[email protected]('dg,combined', product(_test_settings_x2b.keys(), [True, False]))\ndef test_tlearner(dg, combined):\n model = _test_settings_x2b[dg]\n validate_leaner(dg, TLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined, treat=[1, 1], control=[0, 0]),\n check_effect=dg.__name__.find('y2') < 0,\n )\n\n\[email protected]('dg,combined', product(_test_settings.keys(), [True, False]))\ndef test_xleaner(dg, combined):\n model = _test_settings[dg]\n validate_leaner(dg, XLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined),\n check_effect=dg.__name__.find('y2') < 0,\n )\n\n\[email protected]('dg,combined', product(_test_settings.keys(), [True, False]))\ndef test_xleaner_with_treat(dg, combined):\n model = _test_settings[dg]\n validate_leaner(dg, XLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined, treat=1, control=0),\n check_effect=dg.__name__.find('y2') < 0,\n )\n\n\[email protected]('dg,combined', product(_test_settings_x2b.keys(), [True, False]))\ndef test_xleaner_x2b(dg, combined):\n model = _test_settings_x2b[dg]\n validate_leaner(dg, XLearner(model=clone(model)),\n fit_kwargs=dict(combined_treatment=combined, treat=[1, 1], control=[0, 0]),\n check_effect=dg.__name__.find('y2') < 0,\n )\n" ]
[ [ "sklearn.clone", "sklearn.linear_model.LinearRegression", "sklearn.ensemble.GradientBoostingRegressor" ] ]
stxxllbu/CS224n-winter-together
[ "eae158ed8e88dc7c8638e25bac4c4fc8eeddcc8c" ]
[ "Assignments/assignment4/BobOfRivia/run.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCS224N 2019-20: Homework 4\nrun.py: Run Script for Simple NMT Model\nPencheng Yin <[email protected]>\nSahil Chopra <[email protected]>\nVera Lin <[email protected]>\n\nUsage:\n run.py train --train-src=<file> --train-tgt=<file> --dev-src=<file> --dev-tgt=<file> --vocab=<file> [options]\n run.py decode [options] MODEL_PATH TEST_SOURCE_FILE OUTPUT_FILE\n run.py decode [options] MODEL_PATH TEST_SOURCE_FILE TEST_TARGET_FILE OUTPUT_FILE\n\nOptions:\n -h --help show this screen.\n --cuda use GPU\n --train-src=<file> train source file\n --train-tgt=<file> train target file\n --dev-src=<file> dev source file\n --dev-tgt=<file> dev target file\n --vocab=<file> vocab file\n --seed=<int> seed [default: 0]\n --batch-size=<int> batch size [default: 32]\n --embed-size=<int> embedding size [default: 256]\n --hidden-size=<int> hidden size [default: 256]\n --clip-grad=<float> gradient clipping [default: 5.0]\n --log-every=<int> log every [default: 10]\n --max-epoch=<int> max epoch [default: 30]\n --input-feed use input feeding\n --patience=<int> wait for how many iterations to decay learning rate [default: 5]\n --max-num-trial=<int> terminate training after how many trials [default: 5]\n --lr-decay=<float> learning rate decay [default: 0.5]\n --beam-size=<int> beam size [default: 5]\n --sample-size=<int> sample size [default: 5]\n --lr=<float> learning rate [default: 0.001]\n --uniform-init=<float> uniformly initialize all parameters [default: 0.1]\n --save-to=<file> model save path [default: model.bin]\n --valid-niter=<int> perform validation after how many iterations [default: 2000]\n --dropout=<float> dropout [default: 0.3]\n --max-decoding-time-step=<int> maximum number of decoding time steps [default: 70]\n\"\"\"\nimport math\nimport sys\nimport pickle\nimport time\n\n\nfrom docopt import docopt\nfrom nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction\nfrom nmt_model import Hypothesis, NMT\nimport numpy as np\nfrom typing import List, Tuple, Dict, Set, Union\nfrom tqdm import tqdm\nfrom utils import read_corpus, batch_iter\nfrom vocab import Vocab, VocabEntry\n\nimport torch\nimport torch.nn.utils\n\n\ndef evaluate_ppl(model, dev_data, batch_size=32):\n \"\"\" Evaluate perplexity on dev sentences\n @param model (NMT): NMT Model\n @param dev_data (list of (src_sent, tgt_sent)): list of tuples containing source and target sentence\n @param batch_size (batch size)\n @returns ppl (perplixty on dev sentences)\n \"\"\"\n was_training = model.training\n model.eval()\n\n cum_loss = 0.\n cum_tgt_words = 0.\n\n # no_grad() signals backend to throw away all gradients\n with torch.no_grad():\n for src_sents, tgt_sents in batch_iter(dev_data, batch_size):\n loss = -model(src_sents, tgt_sents).sum()\n\n cum_loss += loss.item()\n tgt_word_num_to_predict = sum(len(s[1:]) for s in tgt_sents) # omitting leading `<s>`\n cum_tgt_words += tgt_word_num_to_predict\n\n ppl = np.exp(cum_loss / cum_tgt_words)\n\n if was_training:\n model.train()\n\n return ppl\n\n\ndef compute_corpus_level_bleu_score(references: List[List[str]], hypotheses: List[Hypothesis]) -> float:\n \"\"\" Given decoding results and reference sentences, compute corpus-level BLEU score.\n @param references (List[List[str]]): a list of gold-standard reference target sentences\n @param hypotheses (List[Hypothesis]): a list of hypotheses, one for each reference\n @returns bleu_score: corpus-level BLEU score\n \"\"\"\n if references[0][0] == '<s>':\n references = [ref[1:-1] for ref in references]\n bleu_score = corpus_bleu([[ref] for ref in references],\n [hyp.value for hyp in hypotheses])\n return bleu_score\n\n\ndef train(args: Dict):\n \"\"\" Train the NMT Model.\n @param args (Dict): args from cmd line\n \"\"\"\n # args = {\n # '--train-src':'./en_es_data/train.es',\n # '--train-tgt':'./en_es_data/train.en',\n # '--dev-src':'./en_es_data/dev.es',\n # '--dev-tgt':'./en_es_data/dev.en',\n # '--vocab':'vocab.json'\n # }\n\n train_data_src = read_corpus(args['--train-src'], source='src')\n train_data_tgt = read_corpus(args['--train-tgt'], source='tgt')\n\n dev_data_src = read_corpus(args['--dev-src'], source='src')\n dev_data_tgt = read_corpus(args['--dev-tgt'], source='tgt')\n\n train_data = list(zip(train_data_src, train_data_tgt))\n dev_data = list(zip(dev_data_src, dev_data_tgt))\n\n train_batch_size = int(args['--batch-size'])\n clip_grad = float(args['--clip-grad'])\n valid_niter = int(args['--valid-niter'])\n log_every = int(args['--log-every'])\n model_save_path = args['--save-to']\n\n vocab = Vocab.load(args['--vocab'])\n\n model = NMT(embed_size=int(args['--embed-size']),\n hidden_size=int(args['--hidden-size']),\n dropout_rate=float(args['--dropout']),\n vocab=vocab)\n model.train()\n\n uniform_init = float(args['--uniform-init'])\n if np.abs(uniform_init) > 0.:\n print('uniformly initialize parameters [-%f, +%f]' % (uniform_init, uniform_init), file=sys.stderr)\n for p in model.parameters():\n p.data.uniform_(-uniform_init, uniform_init)\n\n vocab_mask = torch.ones(len(vocab.tgt))\n vocab_mask[vocab.tgt['<pad>']] = 0\n\n device = torch.device(\"cuda:0\" if args['--cuda'] else \"cpu\")\n print('use device: %s' % device, file=sys.stderr)\n\n model = model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=float(args['--lr']))\n\n num_trial = 0\n train_iter = patience = cum_loss = report_loss = cum_tgt_words = report_tgt_words = 0\n cum_examples = report_examples = epoch = valid_num = 0\n hist_valid_scores = []\n train_time = begin_time = time.time()\n print('begin Maximum Likelihood training')\n\n while True:\n epoch += 1\n\n for src_sents, tgt_sents in batch_iter(train_data, batch_size=train_batch_size, shuffle=True):\n train_iter += 1\n\n optimizer.zero_grad()\n\n batch_size = len(src_sents)\n\n example_losses = -model(src_sents, tgt_sents) # (batch_size,)\n batch_loss = example_losses.sum()\n loss = batch_loss / batch_size\n\n loss.backward()\n\n # clip gradient\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)\n\n optimizer.step()\n\n batch_losses_val = batch_loss.item()\n report_loss += batch_losses_val\n cum_loss += batch_losses_val\n\n tgt_words_num_to_predict = sum(len(s[1:]) for s in tgt_sents) # omitting leading `<s>`\n report_tgt_words += tgt_words_num_to_predict\n cum_tgt_words += tgt_words_num_to_predict\n report_examples += batch_size\n cum_examples += batch_size\n\n if train_iter % log_every == 0:\n print('epoch %d, iter %d, avg. loss %.2f, avg. ppl %.2f ' \\\n 'cum. examples %d, speed %.2f words/sec, time elapsed %.2f sec' % (epoch, train_iter,\n report_loss / report_examples,\n math.exp(report_loss / report_tgt_words),\n cum_examples,\n report_tgt_words / (time.time() - train_time),\n time.time() - begin_time), file=sys.stderr)\n\n train_time = time.time()\n report_loss = report_tgt_words = report_examples = 0.\n\n # perform validation\n if train_iter % valid_niter == 0:\n print('epoch %d, iter %d, cum. loss %.2f, cum. ppl %.2f cum. examples %d' % (epoch, train_iter,\n cum_loss / cum_examples,\n np.exp(cum_loss / cum_tgt_words),\n cum_examples), file=sys.stderr)\n\n cum_loss = cum_examples = cum_tgt_words = 0.\n valid_num += 1\n\n print('begin validation ...', file=sys.stderr)\n\n # compute dev. ppl and bleu\n dev_ppl = evaluate_ppl(model, dev_data, batch_size=128) # dev batch size can be a bit larger\n valid_metric = -dev_ppl\n\n print('validation: iter %d, dev. ppl %f' % (train_iter, dev_ppl), file=sys.stderr)\n\n is_better = len(hist_valid_scores) == 0 or valid_metric > max(hist_valid_scores)\n hist_valid_scores.append(valid_metric)\n\n if is_better:\n patience = 0\n print('save currently the best model to [%s]' % model_save_path, file=sys.stderr)\n model.save(model_save_path)\n\n # also save the optimizers' state\n torch.save(optimizer.state_dict(), model_save_path + '.optim')\n elif patience < int(args['--patience']):\n patience += 1\n print('hit patience %d' % patience, file=sys.stderr)\n\n if patience == int(args['--patience']):\n num_trial += 1\n print('hit #%d trial' % num_trial, file=sys.stderr)\n if num_trial == int(args['--max-num-trial']):\n print('early stop!', file=sys.stderr)\n exit(0)\n\n # decay lr, and restore from previously best checkpoint\n lr = optimizer.param_groups[0]['lr'] * float(args['--lr-decay'])\n print('load previously best model and decay learning rate to %f' % lr, file=sys.stderr)\n\n # load model\n params = torch.load(model_save_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(params['state_dict'])\n model = model.to(device)\n\n print('restore parameters of the optimizers', file=sys.stderr)\n optimizer.load_state_dict(torch.load(model_save_path + '.optim'))\n\n # set new lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # reset patience\n patience = 0\n\n if epoch == int(args['--max-epoch']):\n print('reached maximum number of epochs!', file=sys.stderr)\n exit(0)\n\n\ndef decode(args: Dict[str, str]):\n \"\"\" Performs decoding on a test set, and save the best-scoring decoding results.\n If the target gold-standard sentences are given, the function also computes\n corpus-level BLEU score.\n @param args (Dict): args from cmd line\n \"\"\"\n\n print(\"load test source sentences from [{}]\".format(args['TEST_SOURCE_FILE']), file=sys.stderr)\n test_data_src = read_corpus(args['TEST_SOURCE_FILE'], source='src')\n if args['TEST_TARGET_FILE']:\n print(\"load test target sentences from [{}]\".format(args['TEST_TARGET_FILE']), file=sys.stderr)\n test_data_tgt = read_corpus(args['TEST_TARGET_FILE'], source='tgt')\n\n print(\"load model from {}\".format(args['MODEL_PATH']), file=sys.stderr)\n model = NMT.load(args['MODEL_PATH'])\n\n if args['--cuda']:\n model = model.to(torch.device(\"cuda:0\"))\n\n hypotheses = beam_search(model, test_data_src,\n beam_size=int(args['--beam-size']),\n max_decoding_time_step=int(args['--max-decoding-time-step']))\n\n if args['TEST_TARGET_FILE']:\n top_hypotheses = [hyps[0] for hyps in hypotheses]\n bleu_score = compute_corpus_level_bleu_score(test_data_tgt, top_hypotheses)\n print('Corpus BLEU: {}'.format(bleu_score * 100), file=sys.stderr)\n\n with open(args['OUTPUT_FILE'], 'w') as f:\n for src_sent, hyps in zip(test_data_src, hypotheses):\n top_hyp = hyps[0]\n hyp_sent = ' '.join(top_hyp.value)\n f.write(hyp_sent + '\\n')\n\n\ndef beam_search(model: NMT, test_data_src: List[List[str]], beam_size: int, max_decoding_time_step: int) -> List[List[Hypothesis]]:\n \"\"\" Run beam search to construct hypotheses for a list of src-language sentences.\n @param model (NMT): NMT Model\n @param test_data_src (List[List[str]]): List of sentences (words) in source language, from test set.\n @param beam_size (int): beam_size (# of hypotheses to hold for a translation at every step)\n @param max_decoding_time_step (int): maximum sentence length that Beam search can produce\n @returns hypotheses (List[List[Hypothesis]]): List of Hypothesis translations for every source sentence.\n \"\"\"\n was_training = model.training\n model.eval()\n\n hypotheses = []\n with torch.no_grad():\n for src_sent in tqdm(test_data_src, desc='Decoding', file=sys.stdout):\n example_hyps = model.beam_search(src_sent, beam_size=beam_size, max_decoding_time_step=max_decoding_time_step)\n\n hypotheses.append(example_hyps)\n\n if was_training: model.train(was_training)\n\n return hypotheses\n\n\ndef main():\n \"\"\" Main func.\n \"\"\"\n\n args = docopt(__doc__)\n\n # Check pytorch version\n assert(torch.__version__ >= \"1.0.0\"), \"Please update your installation of PyTorch. You have {} and you should have version 1.0.0\".format(torch.__version__)\n\n # seed the random number generators\n seed = int(args['--seed'])\n torch.manual_seed(seed)\n if args['--cuda']:\n torch.cuda.manual_seed(seed)\n np.random.seed(seed * 13 // 7)\n\n if args['train']:\n train(args)\n elif args['decode']:\n decode(args)\n else:\n raise RuntimeError('invalid run mode')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.load", "torch.cuda.manual_seed", "torch.manual_seed", "torch.no_grad", "numpy.random.seed", "numpy.abs", "numpy.exp", "torch.device" ] ]
google-research/understanding-transfer-learning
[ "0e4df444f342784514d91028d0de332103343a94" ]
[ "third_party/fixup_resnet/fixup_resnet_imagenet.py" ]
[ "import torch\nimport torch.nn as nn\nimport numpy as np\n\n\n__all__ = ['FixupResNet', 'fixup_resnet18', 'fixup_resnet34', 'fixup_resnet50', 'fixup_resnet101', 'fixup_resnet152']\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass FixupBasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(FixupBasicBlock, self).__init__()\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.bias1a = nn.Parameter(torch.zeros(1))\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bias1b = nn.Parameter(torch.zeros(1))\n self.relu = nn.ReLU(inplace=True)\n self.bias2a = nn.Parameter(torch.zeros(1))\n self.conv2 = conv3x3(planes, planes)\n self.scale = nn.Parameter(torch.ones(1))\n self.bias2b = nn.Parameter(torch.zeros(1))\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x + self.bias1a)\n out = self.relu(out + self.bias1b)\n\n out = self.conv2(out + self.bias2a)\n out = out * self.scale + self.bias2b\n\n if self.downsample is not None:\n identity = self.downsample(x + self.bias1a)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass FixupBottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(FixupBottleneck, self).__init__()\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.bias1a = nn.Parameter(torch.zeros(1))\n self.conv1 = conv1x1(inplanes, planes)\n self.bias1b = nn.Parameter(torch.zeros(1))\n self.bias2a = nn.Parameter(torch.zeros(1))\n self.conv2 = conv3x3(planes, planes, stride)\n self.bias2b = nn.Parameter(torch.zeros(1))\n self.bias3a = nn.Parameter(torch.zeros(1))\n self.conv3 = conv1x1(planes, planes * self.expansion)\n self.scale = nn.Parameter(torch.ones(1))\n self.bias3b = nn.Parameter(torch.zeros(1))\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x + self.bias1a)\n out = self.relu(out + self.bias1b)\n\n out = self.conv2(out + self.bias2a)\n out = self.relu(out + self.bias2b)\n\n out = self.conv3(out + self.bias3a)\n out = out * self.scale + self.bias3b\n\n if self.downsample is not None:\n identity = self.downsample(x + self.bias1a)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass FixupResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, zero_fc_init=True):\n super(FixupResNet, self).__init__()\n self.num_layers = sum(layers)\n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bias1 = nn.Parameter(torch.zeros(1))\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.bias2 = nn.Parameter(torch.zeros(1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, FixupBasicBlock):\n nn.init.normal_(m.conv1.weight, mean=0, std=np.sqrt(2 / (m.conv1.weight.shape[0] * np.prod(m.conv1.weight.shape[2:]))) * self.num_layers ** (-0.5))\n nn.init.constant_(m.conv2.weight, 0)\n if m.downsample is not None:\n nn.init.normal_(m.downsample.weight, mean=0, std=np.sqrt(2 / (m.downsample.weight.shape[0] * np.prod(m.downsample.weight.shape[2:]))))\n elif isinstance(m, FixupBottleneck):\n nn.init.normal_(m.conv1.weight, mean=0, std=np.sqrt(2 / (m.conv1.weight.shape[0] * np.prod(m.conv1.weight.shape[2:]))) * self.num_layers ** (-0.25))\n nn.init.normal_(m.conv2.weight, mean=0, std=np.sqrt(2 / (m.conv2.weight.shape[0] * np.prod(m.conv2.weight.shape[2:]))) * self.num_layers ** (-0.25))\n nn.init.constant_(m.conv3.weight, 0)\n if m.downsample is not None:\n nn.init.normal_(m.downsample.weight, mean=0, std=np.sqrt(2 / (m.downsample.weight.shape[0] * np.prod(m.downsample.weight.shape[2:]))))\n elif isinstance(m, nn.Linear) and zero_fc_init:\n nn.init.constant_(m.weight, 0)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = conv1x1(self.inplanes, planes * block.expansion, stride)\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.relu(x + self.bias1)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x + self.bias2)\n\n return x\n\n\ndef fixup_resnet18(**kwargs):\n \"\"\"Constructs a Fixup-ResNet-18 model.\n\n \"\"\"\n model = FixupResNet(FixupBasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef fixup_resnet34(**kwargs):\n \"\"\"Constructs a Fixup-ResNet-34 model.\n\n \"\"\"\n model = FixupResNet(FixupBasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef fixup_resnet50(**kwargs):\n \"\"\"Constructs a Fixup-ResNet-50 model.\n\n \"\"\"\n model = FixupResNet(FixupBottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef fixup_resnet101(**kwargs):\n \"\"\"Constructs a Fixup-ResNet-101 model.\n\n \"\"\"\n model = FixupResNet(FixupBottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef fixup_resnet152(**kwargs):\n \"\"\"Constructs a Fixup-ResNet-152 model.\n\n \"\"\"\n model = FixupResNet(FixupBottleneck, [3, 8, 36, 3], **kwargs)\n return model\n" ]
[ [ "torch.nn.MaxPool2d", "torch.ones", "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Conv2d", "torch.nn.Sequential", "numpy.prod", "torch.zeros", "torch.nn.ReLU" ] ]
Lalala-xnk/Quasi-Attention-ABSA
[ "1eb694e832bbf9687aed719bbcaa657baf0323d3" ]
[ "code/scorer.py" ]
[ "import pandas as pd\nfrom tqdm import tqdm\nfrom model.QACGBERT import *\nfrom util.tokenization import *\nfrom torch.utils.data import DataLoader, TensorDataset\nimport random\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\ncontext_id_map_fiqa = {'stock': 0,\n 'corporate': 1,\n 'market': 2,\n 'economy': 3}\n\n\nclass InputExample(object):\n def __init__(self, guid, text_a, text_b=None, label=None):\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n def __init__(self, input_ids, input_mask, segment_ids, score, seq_len, context_ids):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.score = score\n self.seq_len = seq_len\n self.context_ids = context_ids\n\n\ndef convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python 3\")\n\n\ndef get_test_examples(path):\n test_data = pd.read_csv(path, header=None).values\n def _create_examples(lines, set_type):\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(str(line[2]))\n text_b = convert_to_unicode(str(line[1]))\n label = float(0)\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n return _create_examples(test_data, \"test\")\n\n\ndef truncate_seq_pair(tokens_a, tokens_b, max_length):\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef convert_examples_to_features(examples, max_seq_length,\n tokenizer, max_context_length,\n context_standalone, args):\n features = []\n for (ex_index, example) in enumerate(tqdm(examples)):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n tokens_context = None\n if example.text_b:\n tokens_context = tokenizer.tokenize(example.text_b)\n\n if tokens_b and not context_standalone:\n truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b and not context_standalone:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n context_ids = []\n if tokens_context:\n context_ids = [context_id_map_fiqa[example.text_b]]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n seq_len = len(input_ids)\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n while len(context_ids) < max_context_length:\n context_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(context_ids) == max_context_length\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n score=example.label,\n seq_len=seq_len,\n context_ids=context_ids))\n\n return features\n\n\ndef get_model_and_tokenizer(vocab_file,\n bert_config_file=None, init_checkpoint=None,\n do_lower_case=True,\n init_lrp=False):\n tokenizer = FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case, pretrain=False)\n if bert_config_file is not None:\n bert_config = BertConfig.from_json_file(bert_config_file)\n else:\n bert_config = BertConfig(\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02\n )\n bert_config.vocab_size = len(tokenizer.vocab)\n model = QACGBertForSequenceScore(\n bert_config,\n init_weight=True,\n init_lrp=init_lrp)\n\n if init_checkpoint is not None:\n if \"checkpoint\" in init_checkpoint:\n state_dict = torch.load(init_checkpoint, map_location='cpu')\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n if k.startswith('module.'):\n name = k[7:]\n new_state_dict[name] = v\n else:\n new_state_dict[k] = v\n model.load_state_dict(new_state_dict)\n else:\n model.bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'), strict=False)\n return model, tokenizer\n\n\ndef system_setups(args):\n # system related setups\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n torch.distributed.init_process_group(backend='nccl')\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if args.bert_config_file is not None:\n bert_config = BertConfig.from_json_file(args.bert_config_file)\n if args.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length {} because the BERT model was only trained up to sequence length {}\".format(\n args.max_seq_length, bert_config.max_position_embeddings))\n\n return device, n_gpu\n\n\ndef data_and_model_loader(device, n_gpu, args):\n model, tokenizer = get_model_and_tokenizer(vocab_file=args.vocab_file,\n bert_config_file=args.bert_config_file, init_checkpoint=args.init_checkpoint,\n do_lower_case=True,\n init_lrp=False)\n\n test_examples = get_test_examples(args.path)\n test_features = convert_examples_to_features(\n test_examples, args.max_seq_length,\n tokenizer, args.max_context_length,\n args.context_standalone, args)\n\n all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)\n all_score = torch.tensor([f.score for f in test_features], dtype=torch.float)\n all_seq_len = torch.tensor([[f.seq_len] for f in test_features], dtype=torch.long)\n all_context_ids = torch.tensor([f.context_ids for f in test_features], dtype=torch.long)\n\n test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,\n all_score, all_seq_len, all_context_ids)\n test_dataloader = DataLoader(test_data, shuffle=False)\n\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n model.to(device)\n\n return model, test_dataloader\n\n\ndef pred(args):\n device, n_gpu = system_setups(args)\n model, test_dataloader = data_and_model_loader(device, n_gpu, args)\n\n model.eval()\n y_pred = []\n for batch in list(test_dataloader):\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n input_ids, input_mask, segment_ids, score, seq_lens, \\\n context_ids = batch\n max_seq_lens = max(seq_lens)[0]\n input_ids = input_ids[:, :max_seq_lens]\n input_mask = input_mask[:, :max_seq_lens]\n segment_ids = segment_ids[:, :max_seq_lens]\n\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n score = score.to(device)\n seq_lens = seq_lens.to(device)\n context_ids = context_ids.to(device)\n\n _, pred_score, _, _, _, _ = \\\n model(input_ids, segment_ids, input_mask, seq_lens, device=device, labels=score,\n context_ids=context_ids)\n y_pred.append(pred_score.detach().numpy()[0][0])\n\n return y_pred\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--path\")\n parser.add_argument(\"--max_seq_length\", default=128, type=int)\n parser.add_argument(\"--vocab_file\")\n parser.add_argument(\"--bert_config_file\")\n parser.add_argument(\"--init_checkpoint\")\n parser.add_argument('--local_rank', type=int, default=-1)\n parser.add_argument(\"--no_cuda\", default=False, action='store_true')\n parser.add_argument(\"--max_context_length\", default=1, type=int)\n parser.add_argument(\"--context_standalone\", default=False, action='store_true')\n parser.add_argument('--seed', type=int, default=123)\n args = parser.parse_args()\n\n pred_score = pred(args)\n print(pred_score)\n" ]
[ [ "pandas.read_csv", "torch.utils.data.DataLoader", "torch.utils.data.TensorDataset" ] ]
bx3/perigee-bandit
[ "73771672abe9321edbb7d455a59bfb072fafa33f" ]
[ "sim/simple_model/loss_functions.py" ]
[ "import torch\nimport sys\n\nCONSTANT = 1e-10\n\nclass ClusterLoss(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.mse_fn = torch.nn.MSELoss(reduction='sum')\n self.softmax_func = torch.nn.Softmax(dim=0)\n\n # def forward(self, X, H, C, M, T, nM):\n # sim_loss = torch.tensor(0.0, dtype=torch.float32)\n # softmax_func = torch.nn.Softmax(dim=0)\n # for i in range(T):\n # row_loss = torch.tensor(0.0, dtype=torch.float32) \n # scores = torch.ones(T) * float(\"inf\")\n # for j in range(T):\n # if i != j:\n # if (M[i] != M[j]).any():\n # selected = torch.masked_select(X[i]-X[j], (M[i]*M[j]) >0)\n # scores[j] = torch.sqrt(torch.var(selected))\n\n # topk, topk_ind = torch.topk(scores, 1, largest=False)\n # weight = softmax_func(-1*topk)\n\n # for k in range(len(topk_ind)):\n # j = topk_ind[k]\n # row_loss += weight[k]*torch.norm(H[i]-H[j])\n # sim_loss += row_loss\n # loss = self.mse_fn(X*M, (H-C)*M) + sim_loss + 0.1*torch.norm(C)\n # return loss\n\n def forward(self, X, H, C, M, T, nM, row_scores, mc_rows):\n sim_loss = torch.tensor(0.0, dtype=torch.float32)\n for i in mc_rows:\n topk, topk_ind = torch.topk(row_scores[i], 3, largest=False)\n weight = self.softmax_func(-1*topk)\n\n for k in range(len(topk_ind)):\n j = topk_ind[k]\n sim_loss += weight[k]*torch.norm(H[i]-H[j])\n # print(i, j, H[i], H[j], weight, sim_loss, topk, topk_ind, row_scores[i])\n \n loss = self.mse_fn(X*M, (H-C)*M) + sim_loss + 0.1*torch.norm(C) + 0.1*torch.norm(H)\n return loss\n\n\n\nclass ElementLoss(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, X, A, C, easy_estimates_by_row, known_pos_by_row, element_peer_scores, tensor_ind_map, T):\n sim_loss = torch.tensor(0.0, dtype=torch.float32)\n for i in range(T):\n if i in easy_estimates_by_row:\n row_loss = torch.tensor(0.0, dtype=torch.float32)\n easy_unknown = easy_estimates_by_row[i]\n\n for j in easy_unknown:\n for weight, k, mask in element_peer_scores[(i,j)]:\n\n t = tensor_ind_map[(i,j)]\n a = (X[k] + C[k]) - (X[i] + C[i])\n a[j] -= (A[t] - C[i] ) # without - X[i,j], since it is 0\n b = torch.masked_select(a, mask)\n # print(i,j, 'close to', k, 'weight', weight, mask, a, b)\n # print(X[k]*max_time)\n # print(X[i]*max_time)\n\n row_loss += weight/float(len(easy_unknown))*torch.norm(b)\n\n sim_loss += row_loss\n \n return sim_loss + 1*torch.norm(C) + 1*torch.norm(A)\n\n\n\n# def forward(self, X, A, C, easy_estimates, known_pos_by_row, element_peer_scores, tensor_ind_map, indicators, N):\n # sim_loss = torch.tensor(0.0, dtype=torch.float32)\n # for i,j in easy_estimates:\n # ele_loss = torch.tensor(CONSTANT, dtype=torch.float32)\n # for weight, k in element_peer_scores[(i,j)]:\n # a = (X[k,j] + C[k] - A[tensor_ind_map[(i,j)]]) \n # ele_loss += weight*torch.square(a)\n # sim_loss += torch.sqrt(ele_loss)\n\n" ]
[ [ "torch.nn.MSELoss", "torch.nn.Softmax", "torch.tensor", "torch.norm", "torch.topk", "torch.masked_select" ] ]
instadeepai/EGTA-NMARL
[ "544b2e0e4b5518edefc6819975f9de4573ff434c" ]
[ "egta/envs/tragedy/tests/test_tragedy.py" ]
[ "import torch\n\nfrom ..env import Tragedy\n\ndef test_defaults():\n env = Tragedy(regen_properties={\"type\":\"constant\", \"rate\": 0.075})\n obs = env.reset()\n print(\"start\", obs)\n new_obs, reward, done, _ = env.step(torch.tensor([0,]*4))\n print(\"obs\", new_obs)\n print(\"reward\", reward)\n print(\"done\", done)\n new_obs, reward, done, _ = env.step(torch.tensor([1,]*4))\n print(\"obs\", new_obs)\n print(\"reward\", reward)\n print(\"done\", done)\n # new_obs, reward, done, _ = env.step(torch.tensor([2,]*6))\n # print(\"obs\", new_obs)\n # print(\"reward\", reward)\n # print(\"done\", done)\n\n for _ in range(2):\n new_obs, reward, done, _ = env.step(torch.tensor([0,] + [1,]*3))\n print(\"obs\", new_obs)\n print(\"reward\", reward)\n print(\"done\", done)\n\n # for _ in range(2):\n # new_obs, reward, done, _ = env.step(torch.tensor([2,] + [1,]*5))\n # print(\"obs\", new_obs)\n # print(\"reward\", reward)\n # print(\"done\", done)\n\n for _ in range(2):\n new_obs, reward, done, _ = env.step(torch.tensor([1, 0] + [1,]*2))\n print(\"obs\", new_obs)\n print(\"reward\", reward)\n print(\"done\", done)\n\n for _ in range(2):\n new_obs, reward, done, _ = env.step(torch.tensor([[0,]*3 + [1,]]))\n print(\"obs\", new_obs)\n print(\"reward\", reward)\n print(\"done\", done)\n\n # for _ in range(2):\n # new_obs, reward, done, _ = env.step(torch.tensor([1, 2,] + [1,]*4))\n # print(\"obs\", new_obs)\n # print(\"reward\", reward)\n # print(\"done\", done)\n\n assert False\n\n# def test_defaults_batch_env():\n# env = Tragedy()\n# obs = env.reset()\n# print(\"start\", obs)\n# new_obs, reward, done, _ = env.step(torch.tensor([[0.3, 0.3, 0.2], [0.3, 0.3, 0.4]]))\n# print(\"obs\", new_obs)\n# print(\"reward\", reward)\n# print(\"done\", done)\n# new_obs, reward, done, _ = env.step(torch.tensor([[0.3, 0.3, 0.3], [0.1, 0.1, 0.2]]))\n# print(\"obs\", new_obs)\n# print(\"reward\", reward)\n# print(\"done\", done)\n# new_obs, reward, done, _ = env.step(torch.tensor([[0.33, 0.33, 0.35], [0.34, 0.33, 0.32]]))\n# print(\"obs\", new_obs)\n# print(\"reward\", reward)\n# print(\"done\", done)\n# assert False\n\n# def test_discrete_binary_batch_env():\n# env = Tragedy(num_agents=3, batch_size=2, action_space=\"discrete\")\n# obs = env.reset()\n# print(\"start\", obs)\n# new_obs, reward, done, _ = env.step(torch.tensor([[0, 1, 0], [0, 0, 0]]))\n# print(\"obs\", new_obs)\n# print(\"reward\", reward)\n# print(\"done\", done)\n# new_obs, reward, done, _ = env.step(torch.tensor([[1, 0, 1], [1, 1, 1]]))\n# print(\"obs\", new_obs)\n# print(\"reward\", reward)\n# print(\"done\", done)\n# new_obs, reward, done, _ = env.step(torch.tensor([[1, 1, 0], [0, 0, 0]]))\n# print(\"obs\", new_obs)\n# print(\"reward\", reward)\n# print(\"done\", done)\n# assert False\n\n# def test_discrete_trinary_batch_env():\n# env = Tragedy(num_agents=3, batch_size=2, action_space=\"discrete\")\n# obs = env.reset()\n# print(\"start\", obs)\n\n# for _ in range(6):\n# new_obs, reward, done, _ = env.step(torch.tensor([[0, 0, 0], [0, 0, 0]]))\n# print(\"obs\", new_obs)\n# print(\"reward\", reward)\n# print(\"done\", done)\n\n# for _ in range(20):\n# new_obs, reward, done, _ = env.step(torch.tensor([[2, 2, 2], [2, 2, 2]]))\n# print(\"obs\", new_obs)\n# print(\"reward\", reward)\n# print(\"done\", done)\n\n# assert False" ]
[ [ "torch.tensor" ] ]
BYUignite/ODT
[ "291d6ff9ae5813aed3135dc22525c9f0fc99282a" ]
[ "post/coldJet/uucl_rxx0.py" ]
[ "#plots U/UcL vs. r/(x-x0)\n#plot file directory/name at end of function definition\n\nfrom __future__ import division\nimport numpy as np\nfrom data_tools import get_inputFileParameter\nimport matplotlib\nmatplotlib.use('PDF') # or Agg (for png), SVG, PS\nimport matplotlib.pyplot as plt\nfrom data_tools import commentHdr\n\n#-------------------------------------------------------------------------------------\n\ndef uucl_rxx0(DI, profName=\"uvel\"): #use list of cases\n\n cases=[]\n #plotname=\"uucl_rxx0\"\n plotname=\"uucl_rxx0_xD\" #for plotting multiple distances\n \n matplotlib.rcParams.update({'font.size':20, 'figure.autolayout': True}) #, 'font.weight':'bold'})\n\n fig, axL = plt.subplots()\n color = {'coldJet_base':'k', 'coldJet_base_gDens60':'m','coldJet_base_gDens120':'b', 'coldJet_C_10_gDens120':'g', 'coldJet_C_10_ZLES_7_gDens120':'r', 'coldJet_LplanarTau_gDens60':'b--', 'coldJet_ZLES_7_gDens120':'c', 'coldJet__LPlanarTau_C_10_ZLES_7_gDens60':'r--'}\n #color['coldJet_LplanarTau_gDens60']='k' #use if plotting LPlanarTau cases at multiple distances\n #color['coldJet__LPlanarTau_C_10_ZLES_7_gDens60']='k' #use if plotting LPlanarTau cases at multiple distances\n\n for i in range(0,len(DI)):\n\n D = get_inputFileParameter(DI[i], (\"initParams\",\"djeti\"))\n mfile = DI[i]['pdir'] + \"/means_\" + profName + \".dat\"\n data = np.loadtxt(mfile, comments=commentHdr)\n times = get_inputFileParameter(DI[i], (\"dumpTimes\",))\n ua = get_inputFileParameter(DI[i], (\"initParams\",\"vel_min\"))\n\n npts = len(data[:,0])\n ntimes = len(times)\n rnorm = np.empty( (npts, ntimes) )\n U = data[:,1:]\n icl = int(npts/2)+1\n Ucl = U[icl,:]\n \n for j in range(ntimes):\n rnorm[:,j] = data[:,0]/(times[j] - 4.0*D) \n\n #if plotting several cases together, best not to plot multiple downstream distances\n #j = 2; axL.plot(rnorm[:,j], (U[:,j]-ua)/(Ucl[j]-ua), color[DI[i]['cn']]+':') #x/D=10\n #j = 4; axL.plot(rnorm[:,j], (U[:,j]-ua)/(Ucl[j]-ua), color[DI[i]['cn']]+'--') #x/D=20\n j = 10; axL.plot(rnorm[:,j], (U[:,j]-ua)/(Ucl[j]-ua), color[DI[i]['cn']]) #x/D=50\n \n #cases.append(DI[i]['cn'][8:]+', x=10D')\n #cases.append(DI[i]['cn'][8:]+', x=20D')\n cases.append(DI[i]['cn'][8:]+', x=50D')\n plotname+=\"__\"+DI[i]['cn'][8:]\n\n axL.set_ylim([0,1.2])\n axL.set_xlim([-0.25,0.25])\n axL.set_xlabel(r\"$r/(x-x_0)$\", fontsize=22)\n axL.set_ylabel(r\"$v/v_{cL}$\", fontsize=22)\n axL.set_title(\"coldJet\",fontsize=22)\n axL.legend(cases,loc=\"best\", frameon=False, fontsize=8)\n\n #plt.savefig('../../data/plots_coldJet/'+plotname.replace(\".\",\"o\"))\n plt.savefig('../../data/plots_coldJet/'+'uucl_rxx0__ALL'.replace(\".\",\"o\"))\n" ]
[ [ "numpy.empty", "matplotlib.pyplot.subplots", "matplotlib.rcParams.update", "matplotlib.use", "numpy.loadtxt" ] ]
mohantyk/srd
[ "4ca6573226edbde80ea3641f50637c53543e1edc" ]
[ "receiver.py" ]
[ "import numpy as np\nfrom scipy import signal\n\nfrom wavegen import cosine_wave\nfrom utilities import power\n\n#-------------------------\n# Helper functions\ndef combine(iterator, num_elems):\n ''' Pairs up a fixed number of elements at a time '''\n window = [iter(iterator)]*num_elems\n for combo in zip(*window):\n yield combo\n\n#-------------------------\n# Receiver blocks\n\ndef pam2letters(symbols):\n '''\n inputs:\n symbols: list of pam symbols\n outputs:\n msg as a str\n '''\n symbol_to_bits = {3: '11', 1: '10', -1: '01', -3: '00'}\n bits = ''.join(symbol_to_bits[symbol] for symbol in symbols)\n msg = []\n for eight_bits in combine(bits, 8):\n ascii = int(''.join(eight_bits), 2)\n ch = chr(ascii)\n msg.append(ch)\n return ''.join(msg)\n\n\ndef demodulate(sig, fc, Ts, taps=50):\n '''\n Demodulate a carrier wave\n inputs:\n sig: analog signal\n fc : carrier wave frequency\n Ts : sampling duration of analog signal\n taps: number of taps for LPF\n '''\n # Downconvert\n duration = len(sig)*Ts\n _, carrier = cosine_wave(fc, duration, Ts)\n downconverted = sig * carrier\n # Low pass filter the downconverted signal\n Fs = 1/Ts\n band_edges = np.array([0, 0.1, 0.2, 1])*(Fs//2) # Cutoff at 0.2*Fs/2\n damps = [1, 0]\n b = signal.remez(taps, band_edges, damps, fs=Fs)\n # Scaling by 2 below to compensate for cos(x)**2 = (1/2)*[cos(2x) + 1]\n baseband = 2*signal.lfilter(b, 1, downconverted) # Baseband is still in 'analog' domain\n return baseband\n\n\ndef pulse_correlator(sig, M, shape=signal.hamming):\n '''\n inputs:\n sig: baseband signal\n M : oversampling factor\n shape: pulse shape function, should take a single parameter oversample_factor\n '''\n pulse = shape(M)\n # Normalize pulse so that correlation with another pulse gives coeff = 1\n pulse_normalized = pulse/(power(pulse)*len(pulse))\n # In 'full' mode, correlation of a pulse with itself gives an array of 2*M-1 elements\n # The peak is at index M - 1\n correlated = np.correlate(sig, pulse_normalized, 'full')\n return correlated\n\n\ndef quantalph(sig, alphabet):\n '''\n Quantize sig to nearest symbol in alphabet\n '''\n dist = (sig.reshape(-1,1) - alphabet.reshape(1, -1))**2\n idx = np.argmin(dist, axis=1)\n hard_decisions = alphabet[idx]\n return hard_decisions\n\n\ndef eye_diag(analog, n_eye=5, oversample_factor=10):\n '''\n inputs:\n analog: analog signal\n n_eye: number of symbols for eye\n oversample_factor: oversampling factor for analog signal\n output:\n 2D array, each row consists of n_eye symbols\n '''\n M = oversample_factor\n n_eye = 5 # Number of symbols in eye\n groups = len(analog)//(n_eye*M)\n eye_diag = analog[-n_eye*groups*M:].reshape(-1, n_eye*M)\n return eye_diag\n\n\n# Final receiver\ndef ideal_receiver(sig, fc=20, Ts=1/100):\n '''\n inputs:\n sig: received signal (numpy array)\n fc: carrier frequency\n Ts: sampling frequency (for analog signal)\n output:\n decoded msg (str)\n '''\n oversample_factor = int(1/Ts)\n # Demodulate the carrier wave\n taps = 50\n baseband = demodulate(sig, fc, Ts, taps)\n # Use correlation to extract pulse amplitudes\n correlated = pulse_correlator(baseband, oversample_factor)\n # Downsample to get soft decisions\n filter_delay = taps//2 # taps // 2\n correlator_delay = oversample_factor\n sampling_start_idx = filter_delay + correlator_delay - 1\n soft_decisions = correlated[sampling_start_idx::oversample_factor]\n # Quantize to get hard decisions\n alphabet = np.array([-3, -1, 1, 3])\n hard_decisions = quantalph(soft_decisions, alphabet)\n # Decode message\n decoded_msg = pam2letters(hard_decisions)\n return decoded_msg\n\n\n\n\n" ]
[ [ "scipy.signal.remez", "numpy.argmin", "numpy.correlate", "scipy.signal.lfilter", "numpy.array" ] ]
luke-l7/IML.HUJI
[ "dfa440c0c258c4049a4801c6b240f788b98d4639" ]
[ "IMLearn/learners/regressors/polynomial_fitting.py" ]
[ "from __future__ import annotations\nfrom typing import NoReturn\nfrom . import LinearRegression\nfrom ...base import BaseEstimator\nimport numpy as np\n\n\nclass PolynomialFitting(LinearRegression):\n \"\"\"\n Polynomial Fitting using Least Squares estimation\n \"\"\"\n def __init__(self, k: int) -> PolynomialFitting:\n \"\"\"\n Instantiate a polynomial fitting estimator\n\n Parameters\n ----------\n k : int\n Degree of polynomial to fit\n \"\"\"\n super().__init__()\n self._k = k\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to polynomial transformed samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n super(PolynomialFitting, self)._fit(self.__transform(X),y)\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n return super(PolynomialFitting, self)._predict(self.__transform(X))\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n return super(PolynomialFitting, self)._loss(X,y)\n\n def __transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Transform given input according to the univariate polynomial transformation\n\n Parameters\n ----------\n X: ndarray of shape (n_samples,)\n\n Returns\n -------\n transformed: ndarray of shape (n_samples, k+1)\n Vandermonde matrix of given samples up to degree k\n \"\"\"\n vander = np.vander(X,self._k+1,increasing=True)\n return vander\n" ]
[ [ "numpy.vander" ] ]
markr-fu-berlin/ParlAI
[ "29743cc7b47c413c2181f68c0b7ef40a6f06a40f" ]
[ "parlai/agents/mlb_vqa/loadstates.py" ]
[ "#!/usr/bin/env python3\n\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport os\nimport numpy\n\nimport torch\n\nfrom collections import OrderedDict\n\nurls = {}\nurls['dictionary'] = 'http://www.cs.toronto.edu/~rkiros/models/dictionary.txt'\nurls['utable'] = 'http://www.cs.toronto.edu/~rkiros/models/utable.npy'\nurls['uni_skip'] = 'http://www.cs.toronto.edu/~rkiros/models/uni_skip.npz'\n\n\ndef load_dictionary(download_dir):\n path_dico = os.path.join(download_dir, 'dictionary.txt')\n if not os.path.exists(path_dico):\n os.system('mkdir -p ' + download_dir)\n os.system('wget {} -P {}'.format(urls['dictionary'], download_dir))\n with open(path_dico, 'r') as handle:\n dico_list = handle.readlines()\n dico = {word.strip(): idx for idx, word in enumerate(dico_list)}\n return dico\n\n\ndef load_emb_params(download_dir):\n table_name = 'utable'\n path_params = os.path.join(download_dir, table_name + '.npy')\n if not os.path.exists(path_params):\n os.system('mkdir -p ' + download_dir)\n os.system('wget {} -P {}'.format(urls[table_name], download_dir))\n params = numpy.load(path_params, encoding='latin1') # to load from python2\n return params\n\n\ndef load_rnn_params(download_dir):\n skip_name = 'uni_skip'\n path_params = os.path.join(download_dir, skip_name + '.npz')\n if not os.path.exists(path_params):\n os.system('mkdir -p ' + download_dir)\n os.system('wget {} -P {}'.format(urls[skip_name], download_dir))\n params = numpy.load(path_params, encoding='latin1') # to load from python2\n return params\n\n\ndef make_emb_state_dict(dictionary, parameters, vocab):\n weight = torch.zeros(len(vocab), 620)\n unknown_params = parameters[dictionary['UNK']]\n nb_unknown = 0\n for id_weight, word in enumerate(vocab):\n if word in dictionary:\n id_params = dictionary[word]\n params = parameters[id_params]\n else:\n # print('Warning: word `{}` not in dictionary'.format(word))\n params = unknown_params\n nb_unknown += 1\n weight[id_weight] = torch.from_numpy(params)\n state_dict = OrderedDict({'weight': weight})\n if nb_unknown > 0:\n print('Warning: {}/{} words are not in dictionary, thus set UNK'\n .format(nb_unknown, len(dictionary)))\n return state_dict\n\n\ndef make_gru_state_dict(p):\n s = OrderedDict()\n s['bias_ih_l0'] = torch.zeros(7200)\n s['bias_hh_l0'] = torch.zeros(7200) # must stay equal to 0\n s['weight_ih_l0'] = torch.zeros(7200, 620)\n s['weight_hh_l0'] = torch.zeros(7200, 2400)\n s['weight_ih_l0'][:4800] = torch.from_numpy(p['encoder_W']).t()\n s['weight_ih_l0'][4800:] = torch.from_numpy(p['encoder_Wx']).t()\n s['bias_ih_l0'][:4800] = torch.from_numpy(p['encoder_b'])\n s['bias_ih_l0'][4800:] = torch.from_numpy(p['encoder_bx'])\n s['weight_hh_l0'][:4800] = torch.from_numpy(p['encoder_U']).t()\n s['weight_hh_l0'][4800:] = torch.from_numpy(p['encoder_Ux']).t()\n return s\n\n\ndef make_bayesian_state_dict(p):\n s = OrderedDict()\n s['gru_cell.weight_ir.weight'] = torch.from_numpy(p['encoder_W']).t()[:2400]\n s['gru_cell.weight_ii.weight'] = torch.from_numpy(p['encoder_W']).t()[2400:]\n s['gru_cell.weight_in.weight'] = torch.from_numpy(p['encoder_Wx']).t()\n\n s['gru_cell.weight_ir.bias'] = torch.from_numpy(p['encoder_b'])[:2400]\n s['gru_cell.weight_ii.bias'] = torch.from_numpy(p['encoder_b'])[2400:]\n s['gru_cell.weight_in.bias'] = torch.from_numpy(p['encoder_bx'])\n\n s['gru_cell.weight_hr.weight'] = torch.from_numpy(p['encoder_U']).t()[:2400]\n s['gru_cell.weight_hi.weight'] = torch.from_numpy(p['encoder_U']).t()[2400:]\n s['gru_cell.weight_hn.weight'] = torch.from_numpy(p['encoder_Ux']).t()\n return s\n" ]
[ [ "torch.zeros", "numpy.load", "torch.from_numpy" ] ]
jsteggink/trankit
[ "61ef593999bfa29751990d0d4bcf259daed05db4" ]
[ "trankit/adapter_transformers/data/datasets/glue.py" ]
[ "import logging\r\nimport os\r\nimport time\r\nfrom dataclasses import dataclass, field\r\nfrom enum import Enum\r\nfrom typing import List, Optional, Union\r\n\r\nimport torch\r\nfrom filelock import FileLock\r\nfrom torch.utils.data.dataset import Dataset\r\n\r\nfrom ...tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast\r\nfrom ...tokenization_utils import PreTrainedTokenizer\r\nfrom ...tokenization_xlm_roberta import XLMRobertaTokenizer\r\nfrom ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors\r\nfrom ..processors.utils import InputFeatures\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n@dataclass\r\nclass GlueDataTrainingArguments:\r\n \"\"\"\r\n Arguments pertaining to what data we are going to input our model for training and eval.\r\n\r\n Using `HfArgumentParser` we can turn this class\r\n into argparse arguments to be able to specify them on\r\n the command line.\r\n \"\"\"\r\n\r\n task_name: str = field(metadata={\"help\": \"The name of the task to train on: \" + \", \".join(glue_processors.keys())})\r\n data_dir: str = field(\r\n metadata={\"help\": \"The input data dir. Should contain the .tsv files (or other data files) for the task.\"}\r\n )\r\n max_seq_length: int = field(\r\n default=128,\r\n metadata={\r\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\r\n \"than this will be truncated, sequences shorter will be padded.\"\r\n },\r\n )\r\n overwrite_cache: bool = field(\r\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\r\n )\r\n\r\n def __post_init__(self):\r\n self.task_name = self.task_name.lower()\r\n\r\n\r\nclass Split(Enum):\r\n train = \"train\"\r\n dev = \"dev\"\r\n test = \"test\"\r\n\r\n\r\nclass GlueDataset(Dataset):\r\n \"\"\"\r\n This will be superseded by a framework-agnostic approach\r\n soon.\r\n \"\"\"\r\n\r\n args: GlueDataTrainingArguments\r\n output_mode: str\r\n features: List[InputFeatures]\r\n\r\n def __init__(\r\n self,\r\n args: GlueDataTrainingArguments,\r\n tokenizer: PreTrainedTokenizer,\r\n limit_length: Optional[int] = None,\r\n mode: Union[str, Split] = Split.train,\r\n ):\r\n self.args = args\r\n self.processor = glue_processors[args.task_name]()\r\n self.output_mode = glue_output_modes[args.task_name]\r\n if isinstance(mode, str):\r\n try:\r\n mode = Split[mode]\r\n except KeyError:\r\n raise KeyError(\"mode is not a valid split name\")\r\n # Load data features from cache or dataset file\r\n cached_features_file = os.path.join(\r\n args.data_dir,\r\n \"cached_{}_{}_{}_{}\".format(\r\n mode.value, tokenizer.__class__.__name__, str(args.max_seq_length), args.task_name,\r\n ),\r\n )\r\n label_list = self.processor.get_labels()\r\n if args.task_name in [\"mnli\", \"mnli-mm\"] and tokenizer.__class__ in (\r\n RobertaTokenizer,\r\n RobertaTokenizerFast,\r\n XLMRobertaTokenizer,\r\n ):\r\n # HACK(label indices are swapped in RoBERTa pretrained model)\r\n label_list[1], label_list[2] = label_list[2], label_list[1]\r\n self.label_list = label_list\r\n\r\n # Make sure only the first process in distributed training processes the dataset,\r\n # and the others will use the cache.\r\n lock_path = cached_features_file + \".lock\"\r\n with FileLock(lock_path):\r\n\r\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\r\n start = time.time()\r\n self.features = torch.load(cached_features_file)\r\n logger.info(\r\n f\"Loading features from cached file {cached_features_file} [took %.3f s]\", time.time() - start\r\n )\r\n else:\r\n logger.info(f\"Creating features from dataset file at {args.data_dir}\")\r\n\r\n if mode == Split.dev:\r\n examples = self.processor.get_dev_examples(args.data_dir)\r\n elif mode == Split.test:\r\n examples = self.processor.get_test_examples(args.data_dir)\r\n else:\r\n examples = self.processor.get_train_examples(args.data_dir)\r\n if limit_length is not None:\r\n examples = examples[:limit_length]\r\n self.features = glue_convert_examples_to_features(\r\n examples,\r\n tokenizer,\r\n max_length=args.max_seq_length,\r\n label_list=label_list,\r\n output_mode=self.output_mode,\r\n )\r\n start = time.time()\r\n torch.save(self.features, cached_features_file)\r\n # ^ This seems to take a lot of time so I want to investigate why and how we can improve.\r\n logger.info(\r\n \"Saving features into cached file %s [took %.3f s]\", cached_features_file, time.time() - start\r\n )\r\n\r\n def __len__(self):\r\n return len(self.features)\r\n\r\n def __getitem__(self, i) -> InputFeatures:\r\n return self.features[i]\r\n\r\n def get_labels(self):\r\n return self.label_list\r\n" ]
[ [ "torch.save", "torch.load" ] ]
YeongHyeon/Context-Encoder
[ "30efe975a8970dca0dfa70c09c889efadf7c9c09" ]
[ "source/tf_process.py" ]
[ "import os, inspect, time, math\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom random import *\nfrom PIL import Image\nfrom sklearn.decomposition import PCA\n\nPACK_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))+\"/..\"\n\ndef make_dir(path):\n\n try: os.mkdir(path)\n except: pass\n\ndef gray2rgb(gray):\n\n rgb = np.ones((gray.shape[0], gray.shape[1], 3)).astype(np.float32)\n rgb[:, :, 0] = gray[:, :, 0]\n rgb[:, :, 1] = gray[:, :, 0]\n rgb[:, :, 2] = gray[:, :, 0]\n\n return rgb\n\ndef dat2canvas(data, height, width):\n\n numd = math.ceil(np.sqrt(data.shape[0]))\n [dn, dh, dw, dc] = data.shape\n canvas = np.ones((dh*numd, dw*numd, dc)).astype(np.float32)\n\n for y in range(numd):\n for x in range(numd):\n try: tmp = data[x+(y*numd)]\n except: pass\n else: canvas[(y*dh):(y*dh)+height, (x*dw):(x*dw)+width, :] = tmp\n if(dc == 1):\n canvas = gray2rgb(gray=canvas)\n\n return canvas\n\ndef save_img(contents, height, width, names=[\"\", \"\", \"\"], savename=\"\"):\n\n num_cont = len(contents)\n plt.figure(figsize=(5*num_cont+2, 5))\n\n for i in range(num_cont):\n plt.subplot(1,num_cont,i+1)\n plt.title(names[i])\n plt.imshow(dat2canvas(data=contents[i], height=height, width=width))\n\n plt.tight_layout()\n plt.savefig(savename)\n plt.close()\n\ndef discrete_cmap(N, base_cmap=None):\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n\n return base.from_list(cmap_name, color_list, N)\n\ndef latent_plot(latent, y, n, savename=\"\"):\n\n plt.figure(figsize=(6, 5))\n plt.scatter(latent[:, 0], latent[:, 1], c=y, \\\n marker='o', edgecolor='none', cmap=discrete_cmap(n, 'jet'))\n plt.colorbar(ticks=range(n))\n plt.grid()\n plt.tight_layout()\n plt.savefig(savename)\n plt.close()\n\ndef boxplot(contents, savename=\"\"):\n\n data, label = [], []\n for cidx, content in enumerate(contents):\n data.append(content)\n label.append(\"class-%d\" %(cidx))\n\n plt.clf()\n fig, ax1 = plt.subplots()\n bp = ax1.boxplot(data, showfliers=True, whis=3)\n ax1.set_xticklabels(label, rotation=45)\n\n plt.tight_layout()\n plt.savefig(savename)\n plt.close()\n\ndef histogram(contents, savename=\"\"):\n\n n1, _, _ = plt.hist(contents[0], bins=100, alpha=0.5, label='Normal')\n n2, _, _ = plt.hist(contents[1], bins=100, alpha=0.5, label='Abnormal')\n h_inter = np.sum(np.minimum(n1, n2)) / np.sum(n1)\n plt.xlabel(\"MSE\")\n plt.ylabel(\"Number of Data\")\n xmax = max(contents[0].max(), contents[1].max())\n plt.xlim(0, xmax)\n plt.text(x=xmax*0.01, y=max(n1.max(), n2.max()), s=\"Histogram Intersection: %.3f\" %(h_inter))\n plt.legend(loc='upper right')\n plt.savefig(savename)\n plt.close()\n\ndef generate_random_mask(x, height, width):\n\n h_s, w_s = randrange(height//2), randrange(width//2)\n h_d, w_d = randint(height//4, height//2), randint(width//4, width//2)\n h_e, w_e = h_s + h_d, w_s + w_d\n m = np.zeros_like(x)\n m[:, h_s:h_e, w_s:w_e, :] = 1\n\n return m\n\ndef generate_static_mask(x, height, width):\n\n h_d, w_d = height//3, width//3\n h_s, w_s = height//2 - h_d//2, width//2 - w_d//2\n h_e, w_e = h_s + h_d, w_s + w_d\n m = np.zeros_like(x)\n m[:, h_s:h_e, w_s:w_e, :] = 1\n\n return m\n\ndef training(sess, saver, neuralnet, dataset, epochs, batch_size, normalize=True):\n\n print(\"\\nTraining to %d epochs (%d of minibatch size)\" %(epochs, batch_size))\n\n summary_writer = tf.compat.v1.summary.FileWriter(PACK_PATH+'/Checkpoint', sess.graph)\n\n make_dir(path=\"training\")\n\n start_time = time.time()\n iteration = 0\n\n run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)\n run_metadata = tf.compat.v1.RunMetadata()\n\n test_sq = 10\n test_size = test_sq**2\n for epoch in range(epochs):\n\n x_tr, _ = dataset.next_train(batch_size=test_size, fix=True)\n m_tr = generate_static_mask(x=x_tr, height=dataset.height, width=dataset.width)\n\n x_masked, x_restore = sess.run([neuralnet.drop, neuralnet.x_hat], \\\n feed_dict={neuralnet.x:x_tr, neuralnet.m:m_tr, neuralnet.batch_size:x_tr.shape[0]})\n\n save_img(contents=[x_tr, x_masked, x_masked + (x_restore * m_tr), (x_tr-x_restore)**2], \\\n height=dataset.height, width=dataset.width, \\\n names=[\"Input\\n(x)\", \"Masked\\n(from x)\", \"Restoration\\n(x to x-hat)\", \"Difference\"], \\\n savename=os.path.join(\"training\", \"%08d.png\" %(epoch)))\n\n while(True):\n x_tr, terminator = dataset.next_train(batch_size)\n m_tr = generate_random_mask(x=x_tr, height=dataset.height, width=dataset.width)\n\n _, summaries = sess.run([neuralnet.optimizer, neuralnet.summaries], \\\n feed_dict={neuralnet.x:x_tr, neuralnet.m:m_tr, neuralnet.batch_size:x_tr.shape[0]}, \\\n options=run_options, run_metadata=run_metadata)\n loss_rec, loss_adv, loss_tot = sess.run([neuralnet.loss_rec, neuralnet.loss_adv, neuralnet.loss_tot], \\\n feed_dict={neuralnet.x:x_tr, neuralnet.m:m_tr, neuralnet.batch_size:x_tr.shape[0]})\n summary_writer.add_summary(summaries, iteration)\n\n iteration += 1\n if(terminator): break\n\n print(\"Epoch [%d / %d] (%d iteration) Rec:%.3f, Adv:%.3f, Tot:%.3f\" \\\n %(epoch, epochs, iteration, loss_rec, loss_adv, loss_tot))\n saver.save(sess, PACK_PATH+\"/Checkpoint/model_checker\")\n summary_writer.add_run_metadata(run_metadata, 'epoch-%d' % epoch)\n\ndef test(sess, saver, neuralnet, dataset, batch_size):\n\n if(os.path.exists(PACK_PATH+\"/Checkpoint/model_checker.index\")):\n print(\"\\nRestoring parameters\")\n saver.restore(sess, PACK_PATH+\"/Checkpoint/model_checker\")\n\n print(\"\\nTest...\")\n\n make_dir(path=\"test\")\n\n while(True):\n x_te, terminator = dataset.next_test(1)\n m_te = generate_static_mask(x=x_te, height=dataset.height, width=dataset.width)\n\n x_masked, x_restore, restore_loss = sess.run([neuralnet.drop, neuralnet.x_hat, neuralnet.mse_r], \\\n feed_dict={neuralnet.x:x_te, neuralnet.m:m_te, neuralnet.batch_size:x_te.shape[0]})\n\n [h, w, c] = x_te[0].shape\n canvas = np.ones((h, w*3, c), np.float32)\n canvas[:, :w, :] = x_te[0]\n canvas[:, w:w*2, :] = x_masked[0]\n canvas[:, w*2:, :] = x_masked[0] + (x_restore[0] * m_te[0])\n\n result = Image.fromarray((canvas * 255).astype(np.uint8))\n result.save(os.path.join(\"test\", \"%08d.png\" %(dataset.idx_te)))\n\n if(terminator): break\n" ]
[ [ "numpy.sum", "numpy.ones", "tensorflow.compat.v1.RunOptions", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "matplotlib.pyplot.hist", "tensorflow.compat.v1.RunMetadata", "tensorflow.compat.v1.summary.FileWriter", "numpy.linspace", "numpy.minimum", "matplotlib.pyplot.clf", "matplotlib.pyplot.subplots", "matplotlib.pyplot.cm.get_cmap", "matplotlib.pyplot.close", "numpy.zeros_like", "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "matplotlib.pyplot.subplot", "numpy.sqrt", "matplotlib.pyplot.xlabel" ] ]
sicara/mentat
[ "fbcfbbe71042740f05e0b1e368bb747c8e0d10ee" ]
[ "tf_explain/utils/image.py" ]
[ "\"\"\" Module for image operations \"\"\"\nimport numpy as np\nimport tensorflow as tf\n\n\ndef apply_grey_patch(image, top_left_x, top_left_y, patch_size):\n \"\"\"\n Replace a part of the image with a grey patch.\n\n Args:\n image (numpy.ndarray): Input image\n top_left_x (int): Top Left X position of the applied box\n top_left_y (int): Top Left Y position of the applied box\n patch_size (int): Size of patch to apply\n\n Returns:\n numpy.ndarray: Patched image\n \"\"\"\n patched_image = np.array(image, copy=True)\n patched_image[\n top_left_y : top_left_y + patch_size, top_left_x : top_left_x + patch_size, :\n ] = 127.5\n\n return patched_image\n\n\[email protected]\ndef transform_to_normalized_grayscale(tensor):\n \"\"\"\n Transform tensor over RGB axis to grayscale.\n\n Args:\n tensor (tf.Tensor): 4D-Tensor with shape (batch_size, H, W, 3)\n\n Returns:\n tf.Tensor: 4D-Tensor of grayscale tensor, with shape (batch_size, H, W, 1)\n \"\"\"\n grayscale_tensor = tf.reduce_sum(tensor, axis=-1)\n\n normalized_tensor = tf.cast(\n 255 * tf.image.per_image_standardization(grayscale_tensor), tf.uint8\n )\n\n return normalized_tensor\n" ]
[ [ "numpy.array", "tensorflow.image.per_image_standardization", "tensorflow.reduce_sum" ] ]
jiaw-z/FCStereo
[ "f76c3317e0951986b49a3bb794028a8ae067d410" ]
[ "dmb/modeling/stereo/backbones/FC_PSMNet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom dmb.modeling.stereo.layers.basic_layers import conv_bn, conv_bn_relu, BasicBlock\nfrom dmb.modeling.stereo.layers.basic_layers import conv_in_relu, BasicBlock_IN\n\nclass PSM_Encoder_Instance(nn.Module):\n \"\"\"\n Backbone proposed in PSMNet.\n Args:\n in_planes (int): the channels of input\n batch_norm (bool): whether use batch normalization layer, default True\n Inputs:\n l_img (Tensor): left image, in [BatchSize, 3, Height, Width] layout\n r_img (Tensor): right image, in [BatchSize, 3, Height, Width] layout\n Outputs:\n l_fms (Tensor): left image feature maps, in [BatchSize, 32, Height//4, Width//4] layout\n\n r_fms (Tensor): right image feature maps, in [BatchSize, 32, Height//4, Width//4] layout\n \"\"\"\n\n def __init__(self, in_planes=3, batch_norm=True):\n super(PSM_Encoder_Instance, self).__init__()\n self.in_planes = in_planes\n self.batch_norm = batch_norm\n\n self.firstconv = nn.Sequential(\n conv_in_relu(batch_norm, self.in_planes, 32, 3, 2, 1, 1, bias=False),\n conv_in_relu(batch_norm, 32, 32, 3, 1, 1, 1, bias=False),\n conv_in_relu(batch_norm, 32, 32, 3, 1, 1, 1, bias=False),\n )\n\n\n # For building Basic Block\n self.in_planes = 32\n\n # BasicBlock_IN\n self.layer1 = self._make_layer(batch_norm, BasicBlock_IN, 32, 3, 1, 1, 1)\n self.layer2 = self._make_layer(batch_norm, BasicBlock, 64, 16, 2, 1, 1)\n self.layer3 = self._make_layer(batch_norm, BasicBlock, 128, 3, 1, 1, 1)\n self.layer4 = self._make_layer(batch_norm, BasicBlock, 128, 3, 1, 2, 2)\n\n self.branch1 = nn.Sequential(\n nn.AvgPool2d((64, 64), stride=(64, 64)),\n conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False),\n )\n self.branch2 = nn.Sequential(\n nn.AvgPool2d((32, 32), stride=(32, 32)),\n conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False),\n )\n self.branch3 = nn.Sequential(\n nn.AvgPool2d((16, 16), stride=(16, 16)),\n conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False),\n )\n self.branch4 = nn.Sequential(\n nn.AvgPool2d((8, 8), stride=(8, 8)),\n conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False),\n )\n self.lastconv = nn.Sequential(\n conv_bn_relu(batch_norm, 320, 128, 3, 1, 1, 1, bias=False),\n nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, dilation=1, bias=False)\n )\n\n def _make_layer(self, batch_norm, block, out_planes, blocks, stride, padding, dilation):\n downsample = None\n if stride != 1 or self.in_planes != out_planes * block.expansion:\n downsample = conv_bn(\n batch_norm, self.in_planes, out_planes * block.expansion,\n kernel_size=1, stride=stride, padding=0, dilation=1\n )\n\n layers = []\n layers.append(\n block(batch_norm, self.in_planes, out_planes, stride, downsample, padding, dilation)\n )\n self.in_planes = out_planes * block.expansion\n for i in range(1, blocks):\n layers.append(\n block(batch_norm, self.in_planes, out_planes, 1, None, padding, dilation)\n )\n\n return nn.Sequential(*layers)\n\n def _forward(self, x):\n w_arr = []\n for i in range(len(self.firstconv)):\n x = self.firstconv[i](x)\n w_arr.append(x)\n\n for i in range(len(self.layer1)):\n x = self.layer1[i](x)\n w_arr.append(x)\n \n output_2_1 = x\n output_4_0 = self.layer2(output_2_1)\n output_4_1 = self.layer3(output_4_0)\n output_8 = self.layer4(output_4_1)\n\n output_branch1 = self.branch1(output_8)\n output_branch1 = F.interpolate(\n output_branch1, (output_8.size()[2], output_8.size()[3]),\n mode='bilinear', align_corners=True\n )\n\n output_branch2 = self.branch2(output_8)\n output_branch2 = F.interpolate(\n output_branch2, (output_8.size()[2], output_8.size()[3]),\n mode='bilinear', align_corners=True\n )\n\n output_branch3 = self.branch3(output_8)\n output_branch3 = F.interpolate(\n output_branch3, (output_8.size()[2], output_8.size()[3]),\n mode='bilinear', align_corners=True\n )\n\n output_branch4 = self.branch4(output_8)\n output_branch4 = F.interpolate(\n output_branch4, (output_8.size()[2], output_8.size()[3]),\n mode='bilinear', align_corners=True\n )\n\n output_feature = torch.cat(\n (output_4_0, output_8, output_branch4, output_branch3, output_branch2, output_branch1), 1)\n output_feature = self.lastconv(output_feature)\n\n return output_feature, w_arr\n\n def forward(self, input):\n fms, w_arr = self._forward(input)\n\n return [fms, w_arr]\n\n\n\n\nclass FCPSMNetBackbone(nn.Module):\n \"\"\"\n Backbone proposed in PSMNet.\n Args:\n in_planes (int): the channels of input\n batch_norm (bool): whether use batch normalization layer, default True\n Inputs:\n l_img (Tensor): left image, in [BatchSize, 3, Height, Width] layout\n r_img (Tensor): right image, in [BatchSize, 3, Height, Width] layout\n Outputs:\n l_fms (Tensor): left image feature maps, in [BatchSize, 32, Height//4, Width//4] layout\n\n r_fms (Tensor): right image feature maps, in [BatchSize, 32, Height//4, Width//4] layout\n \"\"\"\n\n def __init__(self, in_planes=3, batch_norm=True, m=0.999):\n super(FCPSMNetBackbone, self).__init__() \n self.in_planes = in_planes\n self.m = m\n print('m:{}'.format(m))\n self.encoder_q = PSM_Encoder_Instance(in_planes, batch_norm)\n self.encoder_k = PSM_Encoder_Instance(in_planes, batch_norm)\n\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data.copy_(param_q.data) # initialize\n param_k.requires_grad = False # not update by gradient\n\n @torch.no_grad()\n def _momentum_update_key_encoder(self):\n \"\"\"\n Momentum update of the key encoder\n \"\"\"\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)\n\n def forward(self, *input):\n if len(input) != 2:\n raise ValueError('expected input length 2 (got {} length input)'.format(len(input)))\n\n l_img, r_img = input\n\n l_fms, l_w_arr = self.encoder_q(l_img)\n\n if self.training:\n with torch.no_grad(): # no gradient to keys\n self._momentum_update_key_encoder() # update the key encoder\n r_fms, r_w_arr = self.encoder_k(r_img)\n if isinstance(r_fms, list):\n r_fms[0] = r_fms[0].detach()\n else:\n r_fms = r_fms.detach()\n else:\n r_fms, r_w_arr = self.encoder_q(r_img)\n\n return [l_fms, l_w_arr], [r_fms, r_w_arr]" ]
[ [ "torch.no_grad", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.cat" ] ]
Talendar/deep_learning_exercises
[ "4c375ac478434d085b4b67b7f631d43da9f8a4a1" ]
[ "3_comparison_mlp_and_rbf/rbf_net/RBFNetwork.py" ]
[ "\"\"\" Implementation of a Radial Basis Function (RBF) Network.\n\n@author Gabriel Nogueira (Talendar)\n@author Marcel Otoboni\n\"\"\"\n\nfrom mlp.multilayer_perceptron import MultilayerPerceptron\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\n\nclass RBFNetwork:\n \"\"\" Implementation of a Radial Basis Function (RBF) Network. \"\"\"\n\n def __init__(self, num_output_neurons, num_clusters=8):\n \"\"\" Instantiates a new RBF network.\n\n :param num_output_neurons: number of neurons in the output layer.\n :param num_clusters: number of clusters to be considered by the k-means algorithm.\n \"\"\"\n self._num_clusters = num_clusters\n self._kmeans = None\n self._mlp = MultilayerPerceptron(num_clusters, layers_size=[num_output_neurons], layers_activation=\"linear\")\n\n def _gauss_rbf(self, data, breadth_param=1):\n \"\"\" Transforms the data using the Gaussian radial basis function. \"\"\"\n transformed = []\n for x in data:\n trans_x = np.zeros(self._num_clusters)\n for i, u in enumerate(self._kmeans.cluster_centers_): # iterate through centroids of the clusters\n v = np.linalg.norm(x - u) # distance between x and the centroid\n trans_x[i] = np.exp(-(v**2) / (2 * breadth_param**2)) # gaussian function\n transformed.append(trans_x)\n return np.array(transformed)\n\n def predict(self, x, just_one=False):\n \"\"\" Predicts the class of the given example. \"\"\"\n if just_one:\n x = np.array([x])\n return self._mlp.predict(self._gauss_rbf(x))\n\n def fit(self, data, labels, cost_function, epochs, learning_rate):\n \"\"\" Fits the model to the given data.\n\n :param data: numpy 2D-array in which each ROW represents an input sample vector.\n :param labels: numpy 2D-array in which each ROW represents a vector with the samples' labels.\n :param cost_function: cost function to be minimized.\n :param epochs: number of training epochs (iterations).\n :param learning_rate: learning rate of the model.\n \"\"\"\n self._kmeans = KMeans(n_clusters=self._num_clusters).fit(data)\n data = self._gauss_rbf(data)\n self._mlp.fit(data, labels, cost_function, epochs, learning_rate)\n" ]
[ [ "numpy.zeros", "numpy.exp", "sklearn.cluster.KMeans", "numpy.array", "numpy.linalg.norm" ] ]
ls-2018/tips
[ "1f5f5195d7181b5dd4616db02166f7f92c97f1cd" ]
[ "AWS/get_data.py" ]
[ "# encoding=utf-8\n\"\"\"\n@Time: 2019/7/8 10:09 \n@Author: liushuo\n@File: get_data.py \n@Desc: \n@Software: PyCharm\n\"\"\"\nimport boto3\nimport pandas as pd\n\n# s3 = boto3.resource('s3')\n# bucket = s3.Bucket('s3-billing')\n# data1 = data2 = None\n# for obj in bucket.objects.all():\n# if '936669166135-aws-billing-csv-2017-10.csv' == obj.key:\n# data1 = pd.read_csv(obj.get()['Body'])\n# if '936669166135-aws-billing-csv-2017-9.csv' == obj.key:\n# data2 = pd.read_csv(obj.get()['Body'])\n\n# if data1 and data2:\n# data1.append()\n\n\ndf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [1, 2, 3, 5, 4], 'c': [1, 2, 3, 5, 4], 'd': [1, 2, 3, 5, 4]})\n\n# s = pd.Series([16, 17, 1, 2], index=df1.columns)\n# df = df1.append(s, )\n\n# s = pd.Series([16, 17, 18, 19], name='E', index=df.index)\n# df = pd.concat([df, s], axis=1)\ndf = pd.DataFrame({}).append([{'A': 16, 'B': 17, 'C': 18, 'D': 19}, {'A': 20, 'B': 21, 'C': 22, 'D': 23}],\n ignore_index=True)\n# df.loc[3] = [16, 17, 18, 19]\n\ndf2 = pd.DataFrame({}).append([{'A': 116, 'B': 117, 'C': 118, 'D': 191}, {'A': 210, 'B': 211, 'C': 122, 'D': 213}],\n ignore_index=True)\n\nfor k, v in enumerate(list(df2.index)):\n df1.loc[list(df1.index)[-1] + k] = list(df2.loc[k]) # df2.iloc[k]\n # print(v)\nprint(df1)\n\"\"\"\n A B C D\n0 16 17 18 19\n1 20 21 22 23\n3 16 17 18 19\n\"\"\"\n# df = df.append([1, 2, 3, 4], ignore_index=True)\n\"\"\"\n A B C D 0\n0 16.0 17.0 18.0 19.0 NaN\n1 20.0 21.0 22.0 23.0 NaN\n2 NaN NaN NaN NaN 1.0\n3 NaN NaN NaN NaN 2.0\n4 NaN NaN NaN NaN 3.0\n5 NaN NaN NaN NaN 4.0\n\"\"\"\n# s = pd.Series([16, 17, 18, 19])\n# df = pd.concat([df, s])\n" ]
[ [ "pandas.DataFrame" ] ]
relacs/rlx2nix
[ "be66f02a78a225b01ee391dd706969c789ce28e8" ]
[ "rlx2nix/converter.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright © 2022, Neuroethology Lab Uni Tuebingen\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\nimport re\nimport os\nimport glob\nimport odml\nimport logging\nimport subprocess\nimport numpy as np\nimport nixio as nix\n\nfrom .config import ConfigFile\nfrom .traces import EventTrace, RawTrace\nfrom .stimuli import StimuliDat\nfrom .util import parse_value, odml2nix, only_number\nfrom .stimdescription import parse_stimulus_description\n\nfrom IPython import embed\n\n\nclass Converter(object):\n\n def __init__(self, folder_name, output_name, force=False) -> None:\n if not os.path.exists(folder_name):\n logging.error(f\"{folder_name} does not exist!\")\n raise ValueError(\"File not found error!\")\n self._folder = folder_name\n self._output = output_name\n self._event_traces = None\n self._raw_traces = None\n self._raw_data_arrays = {}\n self._event_data_arrays = {}\n self._stimuli_dat = None\n self._force = force\n self._nixfile = None\n self._block = None\n self._repro_tags = {}\n self._stimulus_mtags = {}\n self.preflight()\n\n def preflight(self):\n logging.debug(f\"Pre-checking folder {self._folder}!\")\n self.check_output()\n self.check_folder()\n logging.debug(\"Pre-checking done.\")\n\n def check_output(self):\n logging.debug(f\"Checking output name: {self._output}!\")\n if os.path.exists(self._output):\n logging.warn(f\"Output file name {self._output} already exists!\")\n if self._force:\n logging.warn(f\"... force flag is set {self._force}, going to overwrite!\")\n else:\n logging.error(f\"Force flag is not set ({self._force}), abort!\")\n raise ValueError(\"Output file {self._output} already exists! If you want to overwrite it use the --force flag.\")\n logging.debug(f\"... ok!\")\n\n return True\n\n def unzip(self, tracename):\n if os.path.exists(tracename):\n logging.debug(f\"\\tunzip: {tracename}\")\n subprocess.check_call([\"gunzip\", tracename])\n\n def find_traces(self):\n event_traces = []\n raw_traces = []\n\n configuration = self.find_config_file()\n for et in self.find_event_traces():\n event_traces.append(EventTrace(et, configuration))\n\n for rt in self.find_raw_traces():\n raw_traces.append(RawTrace(rt, configuration))\n\n return raw_traces, event_traces\n\n def find_raw_traces(self):\n logging.debug(f\"Checking for raw traces!\")\n traces = sorted(glob.glob(os.path.join(self._folder, \"trace-*.raw*\")))\n for rt in traces:\n if rt.endswith(\".gz\") and rt.split(\".gz\")[0] not in traces:\n self.unzip(os.path.split(rt)[-1])\n\n traces = sorted(glob.glob(os.path.join(self._folder, \"trace-*.raw\")))\n logging.debug(f\"Found {len(traces)} raw traces. {[os.path.split(t)[-1] for t in traces]}\")\n\n return traces\n\n def find_event_traces(self):\n logging.debug(\"Discovering event traces!\")\n traces = sorted(glob.glob(os.path.join(self._folder, \"*-events.dat\")))\n logging.debug(f\"Found {len(traces)} event traces. {[os.path.split(t)[-1] for t in traces]}\")\n return traces\n\n def find_config_file(self):\n if not os.path.exists(os.path.join(self._folder, \"relacs.cfg\")):\n logging.error(\"Found no info file!\")\n raise ValueError(f\"No relacs.cfg file found in {self._folder}!\")\n configuration = ConfigFile(os.path.join(self._folder, \"relacs.cfg\"))\n return configuration\n\n def find_info(self):\n filename = os.path.join(self._folder, \"info.dat\")\n if not os.path.exists(filename):\n logging.error(\"Found no info file!\")\n raise ValueError(f\"No info file found in {self._folder}!\")\n return True\n\n def read_info_file(self):\n def looks_like_oldstyle(filename):\n with open(filename, 'r') as f:\n for l in f:\n if \"# Recording\" in l:\n oldtyle = not l.strip().endswith(\":\")\n break\n return oldtyle\n\n filename = os.path.join(self._folder, \"info.dat\")\n oldstyle = looks_like_oldstyle(filename)\n info = {}\n logging.info(\"Reading info file....\")\n try:\n with open(filename, 'r') as f:\n lines = f.readlines()\n except UnicodeDecodeError:\n logging.debug(\"Replacing experimenter...\")\n command = r\"sudo sed -i '/Experimenter/c\\# Experimenter: Anna Stoeckl' %s\" % filename\n subprocess.check_call(command, shell=True)\n with open(filename, 'r') as f:\n lines = f.readlines()\n for l in lines:\n if not l.startswith(\"#\"):\n continue\n l = l.strip(\"#\").strip()\n if len(l) == 0:\n continue\n if oldstyle:\n if not \":\" in l: # subsection\n sec = {}\n info[l[:-1] if l.endswith(\":\") else l] = sec\n else:\n parts = l.split(':')\n sec[parts[0].strip()] = parts[1].strip('\"').strip() if len(parts) > 1 else \"\"\n else:\n if l.endswith(\":\"): # subsection\n sec = {}\n info[l[:-1] if l.endswith(\":\") else l] = sec\n else:\n parts = l.split(': ')\n sec[parts[0].strip()] = parts[1].strip('\"').strip() if len(parts) > 1 else \"\"\n return info\n\n def read_channel_config(self):\n logging.info(\"Reading channel configuration ...\")\n ids = [f\"identifier{i}\" for i in range(1, len(self._raw_traces)+1)]\n units = [f\"unit{i}\" for i in range(1, len(self._raw_traces)+1)]\n sampling_intervals = [f\"sample interval{i}\" for i in range(1, len(self._raw_traces)+1)]\n sampling_rates = [f\"sampling rate{i}\" for i in range(1, len(self._raw_traces)+1)]\n\n channel_config = {}\n for i in range(1, len(self._raw_traces)+1):\n channel_config[i] = {}\n with open(os.path.join(self._folder, \"stimuli.dat\")) as f:\n for line in f:\n if \"#\" in line:\n line = line[1:]\n prop = line.strip().split(\":\")[0].strip()\n value = line.strip().split(\":\")[-1].strip()\n if prop in ids:\n index = int(prop[-1])\n channel_config[index][\"identifier\"] = value\n if prop in units:\n index = int(prop[-1])\n channel_config[index][\"unit\"] = value\n if prop in sampling_intervals:\n index = int(prop[-1])\n channel_config[index][\"sampling interval\"] = value\n if prop in sampling_rates:\n index = int(prop[-1])\n channel_config[index][\"sampling rates\"] = value\n\n if \"analog output traces\" in line: # end of channel configuration, we are done here\n break\n return channel_config\n\n def find_stimulus_info(self):\n logging.debug(\"Scanning stimuli.dat file!\")\n if not os.path.exists(os.path.join(self._folder, \"stimuli.dat\")):\n logging.error(\"Found no stimuli.dat file! Abort!\")\n raise ValueError(\"No stimuli.dat file found!\")\n\n def find_stimulus_descriptions(self):\n logging.debug(\"Scanning stimulus-descriptions.dat!\")\n filename = os.path.join(self._folder, \"stimulus-descriptions.dat\")\n if not os.path.exists(filename):\n logging.warning(\"Stimulus descriptions file {filename} does not exist!\")\n return False\n return True\n\n def check_folder(self):\n logging.debug(\"Checking folder structure: ...\")\n self._raw_traces, self._event_traces = self.find_traces()\n self.find_info()\n logging.debug(\"Found info file!\")\n self.find_stimulus_info()\n logging.debug(\"Found stimulus information!\")\n stim_descriptions_found = self.find_stimulus_descriptions()\n if stim_descriptions_found:\n logging.debug(\"Found stimulus descriptions!\")\n else:\n logging.debug(\"Did not find stimulus descriptions!\")\n return True\n\n def convert_dataset_info(self, metadata, parent_section=None):\n def split_list(value_str):\n results = None\n if len(value_str) == 0:\n return \" \"\n if \"|\" in value_str:\n results = list(map(str.strip, value_str.split(\"|\")))\n elif value_str[0] == \"[\" and \"]\" in value_str:\n results = list(map(str.strip, value_str[1:value_str.index(\"]\")].split(', ')))\n else: \n results = value_str\n return results\n\n if parent_section is not None:\n for k in metadata.keys():\n if isinstance(metadata[k], dict):\n sec = parent_section.create_section(k, k.lower())\n self.convert_dataset_info(metadata[k], sec)\n else: # is property\n value, unit = parse_value(metadata[k])\n if value is None:\n continue\n if isinstance(value, str):\n value = split_list(value)\n p = parent_section.create_property(k, value)\n if unit is not None:\n p.unit = unit\n\n def open_nix_file(self):\n info = self.read_info_file()\n logging.info(f\"Creating output file {self._output} ...\")\n self._nixfile = nix.File.open(self._output, nix.FileMode.Overwrite)\n dataset_name = os.path.split(self._output)[-1].strip(\".nix\")\n\n self._block = self._nixfile.create_block(dataset_name, \"relacs.recording\")\n sec = self._nixfile.create_section(dataset_name, \"relacs.recording\")\n self._block.metadata = sec\n sec.create_property(\"relacs-nix version\", 1.1)\n self.convert_dataset_info(info, sec)\n\n def convert_raw_traces(self, channel_config):\n logging.info(\"Converting raw traces, this may take a little while...\")\n\n for rt in self._raw_traces:\n logging.info(f\"... trace {rt._trace_no}: {rt.name}\")\n data = np.fromfile(os.path.join(self._folder, rt.filename), dtype=np.float32)\n da = self._block.create_data_array(rt.name, f\"relacs.data.sampled.{rt.name}\", dtype=nix.DataType.Float, data=data)\n da.unit = channel_config[rt._trace_no][\"unit\"]\n si = float(channel_config[rt._trace_no][\"sampling interval\"][:-2]) / 1000.\n da.append_sampled_dimension(si, unit=\"s\")\n self._raw_data_arrays[rt] = da\n\n def convert_event_traces(self):\n\n def read_event_data(filename):\n logging.info(f\"... reading event times from file {filename}...\")\n times = []\n with open(filename, 'r') as f:\n for l in f:\n if len(l.strip()) == 0 or \"#\" in l:\n continue\n times.append(float(l.strip().split()[0].strip()))\n\n return np.array(times)\n\n logging.info(\"Converting event traces...\")\n for et in self._event_traces:\n logging.info(f\"... trace {et.name}\")\n event_times = read_event_data(et._filename)\n da = self._block.create_data_array(et.name, f\"relacs.data.events.{et.name}\", data=event_times)\n da.unit = \"s\"\n da.append_range_dimension_using_self()\n da.definition = f\"Events detected in {et.inputtrace}\"\n self._event_data_arrays[et] = da\n\n def convert_stimuli(self):\n def stimulus_descriptions(repro_name, reprorun, sampleinterval):\n \n def skip_first_index(signals):\n skip = True\n for s in signals:\n skip = skip and s.data[0].strip() == \"-\"\n return skip\n\n def find_active_signal(signals, stimulus_no):\n for i, s in enumerate(signals):\n if s.data[stimulus_no].strip() != \"-\":\n return i\n\n def parse_parameter(parameter_str):\n props = []\n if parameter_str.strip().startswith(\"\\\"\"):\n parameter_str = parameter_str[1:-1]\n parts = parameter_str.split(\",\")\n for p in parts:\n name = p.split(\":\")[0].strip()\n value_str = p.split(\":\")[-1].strip()\n value, unit = parse_value(value_str)\n props.append(odml.Property(name=name, value=value, unit=unit))\n return props\n\n stimuli = []\n stimulus_columns = reprorun.table[\"stimulus\"]\n signals = stimulus_columns.columns_by_name(\"signal\")\n skip_first = skip_first_index(signals)\n index_col = reprorun.table.find_column(1)\n abstimes = stimulus_columns.columns_by_name(\"time\")[0]\n delays = stimulus_columns.columns_by_name(\"delay\")[0]\n durations = stimulus_columns.columns_by_name(\"duration\")\n amplitudes = stimulus_columns.columns_by_name(\"amplitude\")\n if len(amplitudes) == 0: # this is an attempt for very old pre 2011 files.\n amplitudes = stimulus_columns.columns_by_name(\"%6.3f\")\n \n parameters = stimulus_columns.columns_by_name(\"parameter\")\n for i in range(0 if not skip_first else 1, len(index_col)):\n start_time = index_col[i] * sampleinterval\n active = find_active_signal(signals, i)\n characteristics = odml.Section(f\"{repro_name}_{i}\")\n characteristics.create_property(\"signal\", signals[active].data[i])\n p = characteristics.create_property(\"start_time\", start_time)\n p.unit = \"s\"\n dur = float(durations[active].data[i]) / (1000 if durations[active].type_or_unit == \"ms\" else 1)\n p = characteristics.create_property(\"duration\", dur)\n p.unit = \"s\"\n p = characteristics.create_property(\"amplitude\", float(amplitudes[active].data[i]))\n p.unit = amplitudes[active].type_or_unit\n d = float(delays.data[i]) / (1000 if delays.type_or_unit == \"ms\" else 1)\n p = characteristics.create_property(\"delay\", d)\n p.unit = \"s\"\n at = float(abstimes.data[i]) / (1000 if abstimes.type_or_unit == \"ms\" else 1)\n p = characteristics.create_property(\"abs_time\", at)\n p.unit = \"s\"\n characteristics.create_property(\"repro_tag_id\", self._repro_tags[repro_name].id)\n if len(parameters) > 0:\n params = parse_parameter(parameters[active].data[i])\n for p in params:\n characteristics.append(p)\n stimuli.append(characteristics)\n return stimuli\n\n def stimuli(sampleinterval):\n stims = {}\n counter = {}\n stim_metadata = parse_stimulus_description(os.path.join(self._folder, \"stimulus-descriptions.dat\"))\n for rr in self._stimuli_dat.repro_runs:\n if rr is None or rr.name is None:\n print(rr)\n continue\n if rr.name in counter:\n counter[rr.name] += 1\n else:\n counter[rr.name] = 1\n if not rr.valid:\n continue\n if \"BaselineActivity\" in rr.name:\n continue # there are no stimulus presented during baseline\n repro_name = f\"{rr.name}_{counter[rr.name]}\"\n stims[repro_name] = stimulus_descriptions(repro_name, rr, sampleinterval)\n \n return stims, stim_metadata\n\n def store_stimuli(stims, stim_metadata):\n def store_features(signal, features):\n excluded_feats = [\"start_time\", \"duration\", \"signal\"]\n fixed_feats = [\"abs_time\", \"amplitude\", \"repro_tag_id\"]\n feats = {}\n for i, feat in enumerate(features):\n for p in feat:\n if p.name in excluded_feats:\n continue\n if p.name not in feats:\n if p.dtype == \"string\":\n feats[p.name] = np.empty(len(features), dtype=object)\n feats[p.name][i] = p.values[0]\n else:\n feats[p.name] = np.empty(len(features))\n else:\n feats[p.name][i] = p.values[0]\n for key in feats.keys():\n feat_name = f\"{signal}_{key}\"\n feat_type = f\"relacs.feature.{key if key in fixed_feats else 'mutable'}\"\n mtag = self._stimulus_mtags[signal]\n shape = (len(feats[key]), 1)\n data = np.reshape(feats[key], shape)\n dtype = nix.DataType.String if data.dtype == object else nix.DataType.Float\n feature_da = self._block.create_data_array(feat_name, feat_type, \n shape= shape, dtype=dtype,\n data=data)\n feature_da.append_set_dimension()\n mtag.create_feature(feature_da, nix.LinkType.Indexed)\n return None\n\n unique_signals = []\n signal_counts = {}\n signal_starts = {}\n signal_durations = {}\n signal_features = {}\n for repro_run in stims:\n for stim in stims[repro_run]:\n signal = stim.props[\"signal\"].values[0]\n if signal not in unique_signals:\n unique_signals.append(signal)\n signal_counts[signal] = 1\n signal_starts[signal] = [stim.props[\"start_time\"].values[0]]\n signal_durations[signal] = [stim.props[\"duration\"].values[0]]\n signal_features[signal] = [stim]\n else:\n signal_starts[signal].append(stim.props[\"start_time\"].values[0])\n signal_durations[signal].append(stim.props[\"duration\"].values[0])\n signal_counts[signal] += 1\n signal_features[signal].append(stim)\n\n excluded_refs = [\"restart\", \"recording\", \"stimulus\"]\n for signal in unique_signals:\n positions = self._block.create_data_array(f\"{signal}_onset_times\", \"relacs.stimulus.onset\",\n data=np.atleast_2d(signal_starts[signal]).T)\n positions.append_set_dimension()\n\n extents = self._block.create_data_array(f\"{signal}_durations\", \"relacs.stimulus.duration\",\n data=np.atleast_2d(signal_durations[signal]).T)\n extents.append_set_dimension()\n\n mtag = self._block.create_multi_tag(signal, \"relacs.stimulus.segment\", positions=positions, \n extents=extents)\n self._stimulus_mtags[signal] = mtag\n for et in self._event_data_arrays:\n if et not in excluded_refs:\n mtag.references.append(self._event_data_arrays[et])\n for rt in self._raw_data_arrays:\n mtag.references.append(self._raw_data_arrays[rt])\n\n if stim_metadata is not None and signal in stim_metadata.sections:\n metadata = stim_metadata[signal]\n mtag.metadata = self._nixfile.create_section(mtag.name, \"relacs.stimulus\")\n odml2nix(metadata, mtag.metadata)\n store_features(signal, signal_features[signal])\n\n return None\n\n sampleinterval = self._stimuli_dat.input_settings.props[\"sample interval1\"].values[0] /1000\n stims, metadata = stimuli(sampleinterval)\n store_stimuli(stims, metadata)\n\n return\n\n def convert_repro_runs(self):\n def repro_times(reprorun, sampleinterval):\n if reprorun.name is None:\n return None, None\n if not reprorun.valid:\n return None, None\n index_col = reprorun.table.find_column(1)\n if len(index_col) == 0:\n return None, None\n\n stimulus_grp = reprorun.table[\"stimulus\"]\n signals = stimulus_grp.columns_by_name(\"signal\")\n is_init = np.any(np.array([s[0] for s in signals], dtype=object) == \"init\")\n delay_cols = stimulus_grp.columns_by_name(\"delay\")\n delay = 0.0 if (len(delay_cols) == 0 or is_init) else delay_cols[0][0]\n start_time = index_col[0] * sampleinterval - delay / 1000.\n\n duration_cols = stimulus_grp.columns_by_name(\"duration\")\n duration = 0.0\n if \"BaselineActivity\" in reprorun.name:\n duration = 0.0\n end_time = start_time\n else:\n for d in duration_cols:\n dur = d[-1]\n if isinstance(dur, (int, float)):\n duration = dur / 1000\n break\n elif isinstance(dur, str) and only_number.search(dur) is not None:\n duration = float(dur) / 1000\n break\n end_time = index_col[-1] * sampleinterval + duration\n logging.debug(f\"Repro {reprorun.name} from {start_time} to {end_time}s\")\n return start_time, end_time\n\n def repro_runs():\n repro_names = []\n repro_starts = []\n repro_ends = []\n repro_durations = []\n repro_metadata = []\n sampleinterval = self._stimuli_dat.input_settings.props[\"sample interval1\"].values[0] /1000\n counter = {}\n for i, rr in enumerate(self._stimuli_dat.repro_runs):\n if rr.name in counter:\n counter[rr.name] += 1\n else:\n counter[rr.name] = 1\n \n if not rr.valid:\n continue\n start, end = repro_times(rr, sampleinterval)\n if start is None:\n logging.error(f\"RePro run: {rr.name} has no start/stop entries! It is ignored!\")\n continue\n \n repro_names.append(f\"{rr.name}_{counter[rr.name]}\")\n\n repro_starts.append(start)\n repro_durations.append(end - start)\n repro_ends.append(end)\n repro_metadata.append(rr.metadata)\n\n for i, (start, end , duration) in enumerate(zip(repro_starts, repro_ends, repro_durations)):\n logging.debug(f\"Duration {duration} for repro {repro_names[i]} and {i} < {len(repro_starts) - 1}\")\n if duration < sampleinterval and i < len(repro_starts) -1:\n repro_durations[i] = repro_starts[i+1] - start\n logging.debug(f\"\\t new duration: {repro_durations[i]}\")\n repro_ends[i] = repro_starts[i+1]\n\n return repro_names, repro_metadata, repro_starts, repro_durations\n\n def store_repro_runs(repro_names, repro_metadata, start_times, durations):\n excluded_refs = [\"restart\", \"recording\", \"stimulus\"]\n for name, metadata, start, duration in zip(repro_names, repro_metadata, start_times, durations):\n logging.debug(f\"... storing {name} which ran from {start} to {start + duration}.\")\n tag = self._block.create_tag(name, \"relacs.repro_run\", position=[start])\n tag.extent = [duration]\n for et in self._event_data_arrays:\n if et not in excluded_refs:\n tag.references.append(self._event_data_arrays[et])\n for rt in self._raw_data_arrays:\n tag.references.append(self._raw_data_arrays[rt])\n tag.metadata = self._nixfile.create_section(name, \"relacs.repro\")\n odml2nix(metadata, tag.metadata)\n self._repro_tags[name] = tag\n\n names, metadata, starts, durations = repro_runs()\n logging.info(\"Converting RePro runs...\")\n store_repro_runs(names, metadata, starts, durations)\n\n def convert(self):\n logging.info(f\"Converting dataset {self._folder} to nix file {self._output}!\")\n\n channel_config = self.read_channel_config()\n self.open_nix_file()\n self.convert_raw_traces(channel_config)\n self.convert_event_traces()\n\n self._stimuli_dat = StimuliDat(os.path.join(self._folder, \"stimuli.dat\"))\n self.convert_repro_runs()\n self.convert_stimuli()\n self._nixfile.close()\n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.atleast_2d" ] ]
LinjianLi/Seq2Seq-PyTorch
[ "671bd10ac1a2620fb4d5ceaacdff9c0e9f4738a2" ]
[ "seq2seq/inputter/embedder.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n################################################################################\n#\n# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved\n#\n################################################################################\n\"\"\"\nFile: source/encoders/embedder.py\n\"\"\"\n\nimport logging\nimport torch\nimport torch.nn as nn\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Embedder(nn.Embedding):\n \"\"\"\n Embedder\n \"\"\"\n def load_embeddings(self, embeds, scale=0.05):\n \"\"\"\n load_embeddings\n \"\"\"\n assert len(embeds) == self.num_embeddings\n\n embeds = torch.tensor(embeds)\n num_known = 0\n for i in range(len(embeds)):\n # If no pretrained embedding for this token, randomly generate one.\n if len(embeds[i].nonzero()) == 0:\n nn.init.uniform_(embeds[i], -scale, scale)\n else:\n num_known += 1\n self.weight.data.copy_(embeds)\n logger.info(\"{} words have pretrained embeddings\"\n \" (coverage: {:.3f})\".format(\n num_known, num_known / self.num_embeddings))\n" ]
[ [ "torch.nn.init.uniform_", "torch.tensor" ] ]
hoelzl/ML-Course
[ "efa7ccb7c6583753675bbcda569d3184d1ca98d2" ]
[ "notebooks/nb074_ensembles.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.13.7\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] slideshow={\"slide_type\": \"slide\"}\n# # Ensembles\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeRegressor\n\nsns.set_theme()\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nrng = np.random.default_rng(42)\n\nx = rng.uniform(size=(150, 1), low=0.0, high=10.0)\nx_train, x_test = x[:100], x[100:]\n\nx_plot = np.linspace(0, 10, 500).reshape(-1, 1)\n\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndef lin(x):\n return 0.85 * x - 1.5\n\n\n# %% slideshow={\"slide_type\": \"-\"}\ndef fun(x):\n return 2 * np.sin(x) + 0.1 * x ** 2 - 2\n\n\n# %% slideshow={\"slide_type\": \"-\"}\ndef randomize(fun, x, scale=0.5):\n return fun(x) + rng.normal(size=x.shape, scale=scale)\n\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndef evaluate_non_random_regressor(reg_type, f_y, *args, **kwargs):\n reg = reg_type(*args, **kwargs)\n\n y_train = f_y(x_train).reshape(-1)\n y_test = f_y(x_test).reshape(-1)\n\n reg.fit(x_train, y_train)\n y_pred = reg.predict(x_test)\n\n x_plot = np.linspace(0, 10, 500).reshape(-1, 1)\n fig, ax = plt.subplots(figsize=(20, 8))\n sns.lineplot(x=x_plot[:, 0], y=reg.predict(x_plot), ax=ax)\n sns.lineplot(x=x_plot[:, 0], y=f_y(x_plot[:, 0]), ax=ax)\n sns.scatterplot(x=x_train[:, 0], y=y_train, ax=ax)\n plt.show()\n\n mae = mean_absolute_error(y_test, y_pred)\n mse = mean_squared_error(y_test, y_pred)\n rmse = np.sqrt(mean_squared_error(y_test, y_pred))\n print(\n \"\\nNo randomness: \" f\"MAE = {mae:.2f}, MSE = {mse:.2f}, RMSE = {rmse:.2f}\"\n )\n\n return reg\n\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndef plot_graphs(f_y, reg, reg_rand, reg_chaos, y_train, y_rand_train, y_chaos_train):\n x_plot = np.linspace(0, 10, 500).reshape(-1, 1)\n fig, ax = plt.subplots(figsize=(20, 12))\n sns.lineplot(x=x_plot[:, 0], y=reg.predict(x_plot), ax=ax)\n sns.scatterplot(x=x_train[:, 0], y=y_train, ax=ax)\n\n sns.lineplot(x=x_plot[:, 0], y=reg_rand.predict(x_plot), ax=ax)\n sns.scatterplot(x=x_train[:, 0], y=y_rand_train, ax=ax)\n\n sns.lineplot(x=x_plot[:, 0], y=reg_chaos.predict(x_plot), ax=ax)\n sns.scatterplot(x=x_train[:, 0], y=y_chaos_train, ax=ax)\n\n sns.lineplot(x=x_plot[:, 0], y=f_y(x_plot[:, 0]), ax=ax)\n plt.show() \n\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndef print_evaluation(y_test, y_pred, y_rand_test, y_rand_pred, y_chaos_test, y_chaos_pred):\n mae = mean_absolute_error(y_test, y_pred)\n mae_rand = mean_absolute_error(y_rand_test, y_rand_pred)\n mae_chaos = mean_absolute_error(y_chaos_test, y_chaos_pred)\n\n mse = mean_squared_error(y_test, y_pred)\n mse_rand = mean_squared_error(y_rand_test, y_rand_pred)\n mse_chaos = mean_squared_error(y_chaos_test, y_chaos_pred)\n\n rmse = np.sqrt(mean_squared_error(y_test, y_pred))\n rmse_rand = np.sqrt(mean_squared_error(y_rand_test, y_rand_pred))\n rmse_chaos = np.sqrt(mean_squared_error(y_chaos_test, y_chaos_pred))\n\n print(\n \"\\nNo randomness: \" f\"MAE = {mae:.2f}, MSE = {mse:.2f}, RMSE = {rmse:.2f}\"\n )\n print(\n \"Some randomness: \"\n f\"MAE = {mae_rand:.2f}, MSE = {mse_rand:.2f}, RMSE = {rmse_rand:.2f}\"\n )\n print(\n \"Lots of randomness: \"\n f\"MAE = {mae_chaos:.2f}, MSE = {mse_chaos:.2f}, RMSE = {rmse_chaos:.2f}\"\n )\n\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndef evaluate_regressor(reg_type, f_y, *args, **kwargs):\n reg = reg_type(*args, **kwargs)\n reg_rand = reg_type(*args, **kwargs)\n reg_chaos = reg_type(*args, **kwargs)\n \n y_train = f_y(x_train).reshape(-1)\n y_test = f_y(x_test).reshape(-1)\n y_pred = reg.fit(x_train, y_train).predict(x_test)\n \n y_rand_train = randomize(f_y, x_train).reshape(-1)\n y_rand_test = randomize(f_y, x_test).reshape(-1)\n y_rand_pred = reg_rand.fit(x_train, y_rand_train).predict(x_test)\n\n y_chaos_train = randomize(f_y, x_train, 1.5).reshape(-1)\n y_chaos_test = randomize(f_y, x_test, 1.5).reshape(-1)\n y_chaos_pred = reg_chaos.fit(x_train, y_chaos_train).predict(x_test)\n\n plot_graphs(f_y, reg, reg_rand, reg_chaos, y_train, y_rand_train, y_chaos_train)\n print_evaluation(y_test, y_pred, y_rand_test, y_rand_pred, y_chaos_test, y_chaos_pred)\n\n\n# %% [markdown] slideshow={\"slide_type\": \"slide\"}\n# # Ensembles, Random Forests, Gradient Boosted Trees\n\n# %% [markdown] slideshow={\"slide_type\": \"slide\"}\n# ## Ensemble Methods\n#\n# Idea: combine several estimators to improve their overal performance.\n#\n# - Averaging methods: \n# - Independent estimators, average predictions\n# - Reduces variance (overfitting)\n# - Bagging, random forests\n# - Boosting methods:\n# - Train estimators sequentially\n# - Each estimator is trained to reduce the bias of its (combined) predecessors\n\n# %% [markdown] slideshow={\"slide_type\": \"subslide\"}\n# ### Bagging\n#\n# - Averaging method: build several estimators of the same type, average their results\n# - Needs some way to introduce differences between estimators\n# - Otherwise variance is not reduced\n# - Train on random subsets of the training data\n# - Reduce overfitting\n# - Work best with strong estimators (e.g., decision trees with (moderately) large depth)\n\n# %% [markdown]\n# ### Random Forests\n#\n# - Bagging classifier/regressor using decision trees\n# - For each tree in the forest:\n# - Subset of training data\n# - Subset of features\n# - Often significant reduction in variance (overfitting)\n# - Sometimes increase in bias\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nfrom sklearn.ensemble import RandomForestRegressor\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_non_random_regressor(RandomForestRegressor, lin, random_state=42);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_non_random_regressor(RandomForestRegressor, fun, random_state=42);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_non_random_regressor(\n RandomForestRegressor, fun, n_estimators=25, criterion=\"absolute_error\", random_state=42\n);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(RandomForestRegressor, lin, random_state=42);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(\n RandomForestRegressor, lin, n_estimators=500, max_depth=3, random_state=42\n)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(\n RandomForestRegressor, lin, n_estimators=500, min_samples_leaf=6, random_state=42\n)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(RandomForestRegressor, fun, random_state=42)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(\n RandomForestRegressor,\n fun,\n n_estimators=1000,\n min_samples_leaf=6,\n random_state=43,\n n_jobs=-1,\n)\n\n# %% [markdown] slideshow={\"slide_type\": \"slide\"}\n# ## Gradient Boosted Trees\n#\n# - Boosting method for both regression and classification\n# - Requires differentiable loss function\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nfrom sklearn.ensemble import GradientBoostingRegressor\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_non_random_regressor(GradientBoostingRegressor, lin);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_non_random_regressor(GradientBoostingRegressor, fun);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(GradientBoostingRegressor, lin);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(GradientBoostingRegressor, lin, n_estimators=200, learning_rate=0.05, loss=\"absolute_error\");\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(GradientBoostingRegressor, lin, n_estimators=500, learning_rate=0.01,\n loss=\"absolute_error\", subsample=0.1, random_state=46);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nevaluate_regressor(GradientBoostingRegressor, fun, n_estimators=500, learning_rate=0.01,\n loss=\"absolute_error\", subsample=0.1, random_state=44);\n\n# %% [markdown] slideshow={\"slide_type\": \"slide\"}\n# ### Multiple Features\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nnp.set_printoptions(precision=1)\n\n# %%\nx, y, coef = make_regression(n_samples=250, n_features=4, n_informative=1, coef=True, random_state=42)\nx.shape, y.shape, coef\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nfig, axs = plt.subplots(ncols=2, nrows=2, figsize=(20, 12))\nfor i, ax in enumerate(axs.reshape(-1)):\n sns.scatterplot(x=x[:, i], y=y, ax=ax)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx, y, coef = make_regression(n_samples=250, n_features=20, n_informative=10, coef=True, random_state=42)\nx.shape, y.shape, coef\n\n# %%\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4)\nx_train.shape, x_test.shape, y_train.shape, y_test.shape\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nfig, axs = plt.subplots(ncols=2, nrows=2, figsize=(20, 12))\nfor i in range(2):\n sns.scatterplot(x=x[:, i], y=y, ax=axs[0, i]);\nfor i in range(2):\n sns.scatterplot(x=x[:, i + 6], y=y, ax=axs[1, i]);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nlr_clf = LinearRegression()\nlr_clf.fit(x_train, y_train)\ny_lr_pred = lr_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_lr_pred), mean_squared_error(y_test, y_lr_pred)\n\n# %%\nlr_clf.coef_.astype(np.int32), coef.astype(np.int32)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndt_clf = DecisionTreeRegressor()\ndt_clf.fit(x_train, y_train)\ny_dt_pred = dt_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_dt_pred), mean_squared_error(y_test, y_dt_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nrf_clf = RandomForestRegressor()\nrf_clf.fit(x_train, y_train)\ny_rf_pred = rf_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_rf_pred), mean_squared_error(y_test, y_rf_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ngb_clf = GradientBoostingRegressor()\ngb_clf.fit(x_train, y_train)\ny_gb_pred = gb_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_gb_pred), mean_squared_error(y_test, y_gb_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx, y, coef = make_regression(n_samples=250, n_features=20, n_informative=10, noise=100.0, coef=True, random_state=42)\nx.shape, y.shape, coef\n\n# %%\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4)\nx_train.shape, x_test.shape, y_train.shape, y_test.shape\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nlr_clf = LinearRegression()\nlr_clf.fit(x_train, y_train)\ny_lr_pred = lr_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_lr_pred), mean_squared_error(y_test, y_lr_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndt_clf = DecisionTreeRegressor()\ndt_clf.fit(x_train, y_train)\ny_dt_pred = dt_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_dt_pred), mean_squared_error(y_test, y_dt_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nrf_clf = RandomForestRegressor()\nrf_clf.fit(x_train, y_train)\ny_rf_pred = rf_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_rf_pred), mean_squared_error(y_test, y_rf_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ngb_clf = GradientBoostingRegressor()\ngb_clf.fit(x_train, y_train)\ny_gb_pred = gb_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_gb_pred), mean_squared_error(y_test, y_gb_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx, y, coef = make_regression(n_samples=250, n_features=20, n_informative=10, noise=100.0,\n coef=True, random_state=42)\ny += (20 * x[:, 1]) ** 2\nx.shape, y.shape, coef\n\n# %%\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4)\nx_train.shape, x_test.shape, y_train.shape, y_test.shape\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nfig, axs = plt.subplots(ncols=2, nrows=2, figsize=(20, 12))\nfor i in range(2):\n sns.scatterplot(x=x[:, i], y=y, ax=axs[0, i]);\nfor i in range(2):\n sns.scatterplot(x=x[:, i + 6], y=y, ax=axs[1, i]);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nlr_clf = LinearRegression()\nlr_clf.fit(x_train, y_train)\ny_lr_pred = lr_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_lr_pred), mean_squared_error(y_test, y_lr_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndt_clf = DecisionTreeRegressor()\ndt_clf.fit(x_train, y_train)\ny_dt_pred = dt_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_dt_pred), mean_squared_error(y_test, y_dt_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nrf_clf = RandomForestRegressor()\nrf_clf.fit(x_train, y_train)\ny_rf_pred = rf_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_rf_pred), mean_squared_error(y_test, y_rf_pred)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ngb_clf = GradientBoostingRegressor()\ngb_clf.fit(x_train, y_train)\ny_gb_pred = gb_clf.predict(x_test)\n\nmean_absolute_error(y_test, y_gb_pred), mean_squared_error(y_test, y_gb_pred)\n\n# %% [markdown] slideshow={\"slide_type\": \"slide\"}\n#\n# ## Feature Engineering\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx = rng.uniform(size=(150, 1), low=0.0, high=10.0)\nx_train, x_test = x[:100], x[100:]\nx_plot = np.linspace(0, 10, 500)\nx_train[:3]\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ny_lin_train = lin(x_train).reshape(-1)\ny_lin_test = lin(x_test).reshape(-1)\ny_fun_train = fun(x_train.reshape(-1))\ny_fun_test = fun(x_test).reshape(-1)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx_squares = x * x\nx_squares[:3]\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx_sins = np.sin(x)\nx_sins[:3]\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx_train_aug = np.concatenate([x_train, x_train * x_train, np.sin(x_train)], axis=1)\nx_train_aug[:3]\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx_test_aug = np.concatenate([x_test, x_test * x_test, np.sin(x_test)], axis=1)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\n# from sklearn.linear_model import Ridge\n# lr_aug_lin = Ridge()\nlr_aug_lin = LinearRegression()\nlr_aug_lin.fit(x_train_aug, y_lin_train);\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nlr_aug_lin.coef_, lr_aug_lin.intercept_\n\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ny_aug_lin_pred = lr_aug_lin.predict(x_test_aug)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nmean_absolute_error(y_lin_test, y_aug_lin_pred), mean_squared_error(\n y_lin_test, y_aug_lin_pred\n)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\nx_test.shape, x_plot.shape\n\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ndef train_and_plot_aug(f_y, scale=0.5):\n y_plot = f_y(x_plot)\n \n f_r = lambda x: randomize(f_y, x, scale=scale)\n y_train = f_r(x_train_aug[:, 0])\n y_test = f_r(x_test)\n \n lr_aug = LinearRegression() # Try with Ridge() as well...\n lr_aug.fit(x_train_aug, y_train)\n y_pred_test = lr_aug.predict(\n np.concatenate([x_test, x_test * x_test, np.sin(x_test)], axis=1)\n )\n x_plot2 = x_plot.reshape(-1, 1)\n y_pred_plot = lr_aug.predict(\n np.concatenate([x_plot2, x_plot2 * x_plot2, np.sin(x_plot2)], axis=1)\n )\n \n fig, ax = plt.subplots(figsize=(12, 6))\n sns.scatterplot(x=x_plot2[:, 0], y=y_plot, color=\"orange\")\n sns.scatterplot(x=x_plot2[:, 0], y=y_pred_plot, color=\"red\")\n sns.scatterplot(x=x_train_aug[:, 0], y=y_train, color=\"green\")\n plt.show()\n\n mae_in = mean_absolute_error(y_test, y_pred_test)\n mse_in = mean_absolute_error(y_test, y_pred_test)\n rmse_in = np.sqrt(mse_in)\n\n y_nr = f_y(x_test)\n mae_true = mean_absolute_error(y_nr, y_pred_test)\n mse_true = mean_absolute_error(y_nr, y_pred_test)\n rmse_true = np.sqrt(mse_true)\n\n print(f\"Vs. input: MAE: {mae_in:.2f}, MSE: {mse_in:.2f}, RMSE: {rmse_in:.2f}\")\n print(f\"True: MAE: {mae_true:.2f}, MSE: {mse_true:.2f}, RMSE: {rmse_true:.2f}\")\n print(f\"Parameters: {lr_aug.coef_}, {lr_aug.intercept_}\")\n\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ntrain_and_plot_aug(lin)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ntrain_and_plot_aug(fun, scale=0.0)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ntrain_and_plot_aug(fun, scale=0.5)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ntrain_and_plot_aug(fun, scale=1.5)\n\n# %% slideshow={\"slide_type\": \"subslide\"}\ntrain_and_plot_aug(fun, scale=3)\n\n\n# %%\ndef fun2(x): return 2.8 * np.sin(x) + 0.3 * x + 0.08 * x ** 2 - 2.5\n\ntrain_and_plot_aug(fun2, scale=1.5)\n\n# %%\ntrain_and_plot_aug(lambda x: np.select([x<=6, x>6], [-0.5, 3.5]))\n\n# %%\n" ]
[ [ "numpy.random.default_rng", "sklearn.metrics.mean_squared_error", "sklearn.metrics.mean_absolute_error", "sklearn.linear_model.LinearRegression", "numpy.set_printoptions", "sklearn.tree.DecisionTreeRegressor", "matplotlib.pyplot.subplots", "sklearn.datasets.make_regression", "sklearn.ensemble.RandomForestRegressor", "matplotlib.pyplot.show", "numpy.select", "numpy.sqrt", "numpy.sin", "numpy.linspace", "sklearn.model_selection.train_test_split", "sklearn.ensemble.GradientBoostingRegressor" ] ]
MayankR/cmr
[ "6c898a5294954899334d430ec71e0a0692a0d99e" ]
[ "nnutils/loss_utils.py" ]
[ "\"\"\"\nLoss Utils.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nfrom . import geom_utils\nimport numpy as np\n\ndef mask_dt_loss(proj_verts, dist_transf):\n \"\"\"\n proj_verts: B x N x 2\n (In normalized coordinate [-1, 1])\n dist_transf: B x 1 x N x N\n\n Computes the distance transform at the points where vertices land.\n \"\"\"\n # Reshape into B x 1 x N x 2\n sample_grid = proj_verts.unsqueeze(1)\n # B x 1 x 1 x N\n dist_transf = torch.nn.functional.grid_sample(dist_transf, sample_grid, padding_mode='border')\n return dist_transf.mean()\n\n\ndef texture_dt_loss(texture_flow, dist_transf, vis_rend=None, cams=None, verts=None, tex_pred=None):\n \"\"\"\n texture_flow: B x F x T x T x 2\n (In normalized coordinate [-1, 1])\n dist_transf: B x 1 x N x N\n\n Similar to geom_utils.sample_textures\n But instead of sampling image, it samples dt values.\n \"\"\"\n # Reshape into B x F x T*T x 2\n T = texture_flow.size(-2)\n F = texture_flow.size(1)\n flow_grid = texture_flow.view(-1, F, T * T, 2)\n # B x 1 x F x T*T\n dist_transf = torch.nn.functional.grid_sample(dist_transf, flow_grid)\n\n if vis_rend is not None:\n # Visualize the error!\n # B x 3 x F x T*T\n dts = dist_transf.repeat(1, 3, 1, 1)\n # B x 3 x F x T x T\n dts = dts.view(-1, 3, F, T, T)\n # B x F x T x T x 3\n dts = dts.permute(0, 2, 3, 4, 1)\n dts = dts.unsqueeze(4).repeat(1, 1, 1, 1, T, 1) / dts.max()\n\n from ..utils import bird_vis\n for i in range(dist_transf.size(0)):\n rend_dt = vis_rend(verts[i], cams[i], dts[i])\n rend_img = bird_vis.tensor2im(tex_pred[i].data) \n import matplotlib.pyplot as plt\n plt.ion()\n fig=plt.figure(1)\n plt.clf()\n ax = fig.add_subplot(121)\n ax.imshow(rend_dt)\n ax = fig.add_subplot(122)\n ax.imshow(rend_img)\n import ipdb; ipdb.set_trace()\n\n return dist_transf.mean()\n\n\ndef texture_loss(img_pred, img_gt, mask_pred, mask_gt):\n \"\"\"\n Input:\n img_pred, img_gt: B x 3 x H x W\n mask_pred, mask_gt: B x H x W\n \"\"\"\n mask_pred = mask_pred.unsqueeze(1)\n mask_gt = mask_gt.unsqueeze(1)\n\n # masked_rend = (img_pred * mask)[0].data.cpu().numpy()\n # masked_gt = (img_gt * mask)[0].data.cpu().numpy()\n # import matplotlib.pyplot as plt\n # plt.ion()\n # plt.figure(1)\n # plt.clf()\n # fig = plt.figure(1)\n # ax = fig.add_subplot(121)\n # ax.imshow(np.transpose(masked_rend, (1, 2, 0)))\n # ax = fig.add_subplot(122)\n # ax.imshow(np.transpose(masked_gt, (1, 2, 0)))\n # import ipdb; ipdb.set_trace()\n\n return torch.nn.L1Loss()(img_pred * mask_pred, img_gt * mask_gt)\n\n\ndef camera_loss(cam_pred, cam_gt, margin):\n \"\"\"\n cam_* are B x 7, [sc, tx, ty, quat]\n Losses are in similar magnitude so one margin is ok.\n \"\"\"\n # CH: camera loss always 0\n return 0\n\n # CH: comment out old loss code\n# rot_pred = cam_pred[:, -4:]\n# rot_gt = cam_gt[:, -4:]\n\n# rot_loss = hinge_loss(quat_loss_geodesic(rot_pred, rot_gt), margin)\n# # Scale and trans.\n# st_loss = (cam_pred[:, :3] - cam_gt[:, :3])**2\n# st_loss = hinge_loss(st_loss.view(-1), margin)\n\n# return rot_loss.mean() + st_loss.mean()\n\ndef hinge_loss(loss, margin):\n # Only penalize if loss > margin\n zeros = torch.autograd.Variable(torch.zeros(1).cuda(), requires_grad=False)\n return torch.max(loss - margin, zeros)\n\n\ndef quat_loss_geodesic(q1, q2):\n '''\n Geodesic rotation loss.\n \n Args:\n q1: N X 4\n q2: N X 4\n Returns:\n loss : N x 1\n '''\n q1 = torch.unsqueeze(q1, 1)\n q2 = torch.unsqueeze(q2, 1)\n q2_conj = torch.cat([ q2[:, :, [0]] , -1*q2[:, :, 1:4] ], dim=-1)\n q_rel = geom_utils.hamilton_product(q1, q2_conj)\n q_loss = 1 - torch.abs(q_rel[:, :, 0])\n # we can also return q_loss*q_loss\n return q_loss\n \n\ndef quat_loss(q1, q2):\n '''\n Anti-podal squared L2 loss.\n \n Args:\n q1: N X 4\n q2: N X 4\n Returns:\n loss : N x 1\n '''\n q_diff_loss = (q1-q2).pow(2).sum(1)\n q_sum_loss = (q1+q2).pow(2).sum(1)\n q_loss, _ = torch.stack((q_diff_loss, q_sum_loss), dim=1).min(1)\n return q_loss\n\n\ndef triangle_loss(verts, edge2verts):\n \"\"\"\n Encourages dihedral angle to be 180 degrees.\n\n Args:\n verts: B X N X 3\n edge2verts: B X E X 4\n Returns:\n loss : scalar\n \"\"\"\n indices_repeat = torch.stack([edge2verts, edge2verts, edge2verts], dim=2) # B X E X 3 X 4\n\n verts_A = torch.gather(verts, 1, indices_repeat[:, :, :, 0])\n verts_B = torch.gather(verts, 1, indices_repeat[:, :, :, 1])\n verts_C = torch.gather(verts, 1, indices_repeat[:, :, :, 2])\n verts_D = torch.gather(verts, 1, indices_repeat[:, :, :, 3])\n\n # n1 = cross(ad, ab)\n # n2 = cross(ab, ac)\n n1 = geom_utils.cross_product(verts_D - verts_A, verts_B - verts_A)\n n2 = geom_utils.cross_product(verts_B - verts_A, verts_C - verts_A)\n\n n1 = torch.nn.functional.normalize(n1, dim=2)\n n2 = torch.nn.functional.normalize(n2, dim=2)\n\n dot_p = (n1 * n2).sum(2)\n loss = ((1 - dot_p)**2).mean()\n return loss\n\n\ndef deform_l2reg(V):\n \"\"\"\n l2 norm on V = B x N x 3\n \"\"\"\n V = V.view(-1, V.size(2))\n return torch.mean(torch.norm(V, p=2, dim=1))\n\n\ndef entropy_loss(A):\n \"\"\"\n Input is K x N\n Each column is a prob of vertices being the one for k-th keypoint.\n We want this to be sparse = low entropy.\n \"\"\"\n entropy = -torch.sum(A * torch.log(A), 1)\n # Return avg entropy over \n return torch.mean(entropy)\n\n\ndef kp_l2_loss(kp_pred, kp_gt):\n \"\"\"\n L2 loss between visible keypoints.\n\n \\Sum_i [0.5 * vis[i] * (kp_gt[i] - kp_pred[i])^2] / (|vis|)\n \"\"\"\n # CH: Do not consider keypoint loss\n return 0\n\n# criterion = torch.nn.MSELoss()\n\n# vis = (kp_gt[:, :, 2, None] > 0).float()\n\n# # This always has to be (output, target), not (target, output)\n# return criterion(vis * kp_pred, vis * kp_gt[:, :, :2])\n\n\ndef lsgan_loss(score_real, score_fake):\n \"\"\"\n DELETE ME.\n Label 0=fake, 1=real.\n score_real is B x 1, score for real samples\n score_fake is B x 1, score for fake samples\n\n Returns loss for discriminator and encoder.\n \"\"\"\n\n disc_loss_real = torch.mean((score_real - 1)**2)\n disc_loss_fake = torch.mean((score_fake)**2)\n disc_loss = disc_loss_real + disc_loss_fake\n\n enc_loss = torch.mean((score_fake - 1)**2)\n\n return disc_loss, enc_loss\n\n\nclass EdgeLoss(object):\n \"\"\"\n Edge length should not diverge from the original edge length.\n\n On initialization computes the current edge lengths.\n \"\"\"\n def __init__(self, verts, edges2verts, margin=2, use_bad_edge=False, use_l2=False):\n # Input:\n # verts: B x N x 3\n # edeges2verts: B x E x 4\n # (only using the first 2 columns)\n self.use_l2 = use_l2\n\n # B x E x 2\n edge_verts = edges2verts[:, :, :2]\n self.indices = torch.stack([edge_verts, edge_verts, edge_verts], dim=2)\n V_copy = torch.autograd.Variable(verts.data, requires_grad=False)\n if V_copy.dim() == 2:\n # N x 3 (mean shape) -> B x N x 3\n V_copy = V_copy.unsqueeze(0).repeat(edges2verts.size(0), 1, 1)\n\n if use_bad_edge:\n self.log_e0 = torch.log(self.compute_edgelength(V_copy))\n else:\n # e0 is the mean over all edge lengths!\n e0 = self.compute_edgelength(V_copy).mean(1).view(-1, 1)\n self.log_e0 = torch.log(e0)\n\n self.margin = np.log(margin)\n self.zeros = torch.autograd.Variable(torch.zeros(1).cuda(), requires_grad=False)\n\n # For visualization\n self.v1 = edges2verts[0, :, 0].data.cpu().numpy()\n self.v2 = edges2verts[0, :, 1].data.cpu().numpy()\n\n def __call__(self, verts):\n e1 = self.compute_edgelength(verts)\n if self.use_l2:\n dist = (torch.log(e1) - self.log_e0)**2\n self.dist = torch.max(dist - self.margin**2, self.zeros)\n else:\n dist = torch.abs(torch.log(e1) - self.log_e0)\n self.dist = torch.max(dist - self.margin, self.zeros)\n return self.dist.mean()\n\n def compute_edgelength(self, V):\n v1 = torch.gather(V, 1, self.indices[:, :, :, 0])\n v2 = torch.gather(V, 1, self.indices[:, :, :, 1])\n\n elengths = torch.sqrt(((v1 - v2)**2).sum(2))\n\n # B x E\n return elengths\n\n def visualize(self, verts, F_np, mv=None):\n from psbody.mesh import Mesh\n\n V = verts[0].data.cpu().numpy()\n mesh = Mesh(V, F_np)\n dist = self.dist[0].data.cpu().numpy()\n\n v_weights = np.zeros((V.shape[0]))\n for e_id, (v1_id, v2_id) in enumerate(zip(self.v1, self.v2)):\n v_weights[v1_id] += dist[e_id]\n v_weights[v2_id] += dist[e_id]\n\n mesh.set_vertex_colors_from_weights(v_weights)\n\n if mv is not None:\n mv.set_dynamic_meshes([mesh])\n else:\n mesh.show()\n import ipdb; ipdb.set_trace()\n\n\nclass LaplacianLoss(object):\n \"\"\"\n Encourages minimal mean curvature shapes.\n \"\"\"\n def __init__(self, faces):\n # Input:\n # faces: B x F x 3\n from ..nnutils.laplacian import Laplacian\n # V x V\n self.laplacian = Laplacian(faces)\n self.Lx = None\n\n def __call__(self, verts):\n self.Lx = self.laplacian(verts)\n # Reshape to BV x 3\n Lx = self.Lx.view(-1, self.Lx.size(2))\n loss = torch.norm(Lx, p=2, dim=1).mean()\n return loss\n\n def visualize(self, verts, mv=None):\n # Visualizes the laplacian.\n # Verts is B x N x 3 Variable\n Lx = self.Lx[0].data.cpu().numpy()\n\n V = verts[0].data.cpu().numpy()\n\n from psbody.mesh import Mesh\n F = self.laplacian.F_np[0]\n mesh = Mesh(V, F)\n\n weights = np.linalg.norm(Lx, axis=1)\n mesh.set_vertex_colors_from_weights(weights)\n\n if mv is not None:\n mv.set_dynamic_meshes([mesh])\n else:\n mesh.show()\n import ipdb; ipdb.set_trace()\n\n\nclass PerceptualTextureLoss(object):\n def __init__(self):\n from ..nnutils.perceptual_loss import PerceptualLoss\n self.perceptual_loss = PerceptualLoss()\n\n def __call__(self, img_pred, img_gt, mask_pred, mask_gt):\n \"\"\"\n Input:\n img_pred, img_gt: B x 3 x H x W\n mask_pred, mask_gt: B x H x W\n \"\"\"\n mask_pred = mask_pred.unsqueeze(1)\n mask_gt = mask_gt.unsqueeze(1)\n # masked_rend = (img_pred * mask_pred)[0].data.cpu().numpy()\n # masked_gt = (img_gt * mask_gt)[0].data.cpu().numpy()\n # import matplotlib.pyplot as plt\n # plt.ion()\n # plt.figure(1)\n # plt.clf()\n # fig = plt.figure(1)\n # ax = fig.add_subplot(121)\n # ax.imshow(np.transpose(masked_rend, (1, 2, 0)))\n # ax = fig.add_subplot(122)\n # ax.imshow(np.transpose(masked_gt, (1, 2, 0)))\n # import ipdb; ipdb.set_trace()\n\n # Only use mask_gt..\n dist = self.perceptual_loss(img_pred * mask_gt, img_gt * mask_gt)\n return dist.mean()\n" ]
[ [ "torch.stack", "torch.nn.L1Loss", "numpy.log", "torch.log", "torch.nn.functional.grid_sample", "torch.max", "torch.cat", "matplotlib.pyplot.figure", "torch.autograd.Variable", "torch.gather", "torch.norm", "torch.mean", "torch.unsqueeze", "numpy.zeros", "torch.nn.functional.normalize", "matplotlib.pyplot.clf", "matplotlib.pyplot.ion", "numpy.linalg.norm", "torch.abs", "torch.zeros" ] ]
jlubo/memory-consolidation-stc
[ "f9934760e12de324360297d7fc7902623169cb4d" ]
[ "analysis/averageWeights.py" ]
[ "######################################################\n### Averages early- and late-phase weight matrices ###\n######################################################\n\n### Copyright 2019-2021 Jannik Luboeinski\n### licensed under Apache-2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport numpy as np\nfrom pathlib import Path\n\nnp.set_printoptions(threshold=1e10, linewidth=200) # extend console print range for numpy arrays\n\n# readWeightMatrixData\n# Reads complete weight matrix data from a file (modified from plotFunctions.py)\n# filename: name of the file to read the data from\n# Nl: number of neurons in one row/column\n# return: the adjacency matrix, the early-phase weight matrix, the late-phase weight matrix, the firing rate vector\ndef readWeightMatrixData(filename, Nl):\n\n\t# read weight matrices and firing rates from file\n\ttry:\n\t\twith open(filename) as f:\n\t\t\trawdata = f.read()\n\texcept OSError:\n\t\traise\n\n\trawdata = rawdata.split('\\n\\n')\n\trawmatrix_h = rawdata[0].split('\\n')\n\trawmatrix_z = rawdata[1].split('\\n')\n\trawmatrix_v = rawdata[2].split('\\n')\n\n\trows = len(rawmatrix_v)\n\n\tif (rows != len(rawmatrix_v[0].split('\\t\\t'))) or (rows != Nl):\n\t\traise ValueError(str(rows) + ' instead of ' + str(Nl) + ' lines in data file \"' + filename + '\"')\n\t\tf.close()\n\t\texit()\n\n\tv = np.zeros((Nl,Nl))\n\th = np.zeros((Nl**2,Nl**2))\n\tz = np.zeros((Nl**2,Nl**2))\n\n\tfor i in range(Nl**2):\n\t\tif i < Nl:\n\t\t\tvalue0 = rawmatrix_v[i].split('\\t\\t')\n\t\tvalue1 = rawmatrix_h[i].split('\\t\\t')\n\t\tvalue2 = rawmatrix_z[i].split('\\t\\t')\n\n\t\tfor j in range(Nl**2):\n\t\t\tif i < Nl and j < Nl:\n\t\t\t\tv[i][j] = float(value0[j])\n\t\t\th[i][j] = float(value1[j])\n\t\t\tz[i][j] = float(value2[j])\n\n\tf.close()\n\tconnections = (h > 0)\n\n\treturn connections, h, z, v\n\n# averageWeights\n# Averages early- and late-phase weight matrices over all trials that are available\n# (trial data must be given as *_net_* file in a given directory)\n# nppath: path to the directory to read the data from\n# Nl: the number of excitatory neurons in one line of a quadratic grid\n# time: the time that at which the weights shall be read out\ndef averageWeights(nppath, Nl, time):\n\n\n\tc = np.zeros((Nl**2,Nl**2))\n\th = np.zeros((Nl**2,Nl**2))\n\tz = np.zeros((Nl**2,Nl**2))\n\tv = np.zeros((Nl,Nl))\n\n\tcounter = 0\n\n\t# Looking for data files (\"*_net_*\")\n\trawpaths = Path(nppath)\n\n\tfor x in rawpaths.iterdir():\n\n\t\tpath = str(x)\n\n\t\tif (\"_net_\" + time + \".txt\") in path:\n\n\t\t\ttry:\n\t\t\t\tctmp, htmp, ztmp, vtmp = readWeightMatrixData(path, Nl)\n\t\t\texcept ValueError:\n\t\t\t\traise\n\t\t\texcept OSError:\n\t\t\t\traise\n\t\t\texcept:\n\t\t\t\tprint(\"Error in \" + path)\n\t\t\t\texit()\n\n\t\t\tc = c + ctmp\n\t\t\th = h + htmp\n\t\t\tz = z + ztmp\n\t\t\tv = v + vtmp\n\n\t\t\tcounter += 1\n\n\n\tprint(\"Averaged over \" + str(counter) + \" trials for t = \" + str(time) + \" s.\")\n\tc /= counter\n\th /= counter\n\tz /= counter\n\tv /= counter\n\n\t# write averaged _net_ file containing averaged early-/late-phase weights and firing rates\n\tf = open('net_' + time + '_averaged.txt','wb')\n\tnp.savetxt(f, h, fmt='%.6f', delimiter='\\t\\t')\n\tf.write(b'\\x0a')\n\tnp.savetxt(f, z, fmt='%.6f', delimiter='\\t\\t')\n\tf.write(b'\\x0a')\n\tnp.savetxt(f, v, fmt='%.6f', delimiter='\\t\\t')\n\tf.write(b'\\x0a\\x0a')\n\tf.close()\n\n\t# write averaged connectivity matrix (just for sanity check)\n\t#f = open('conn_' + time + '_averaged.txt','wb')\n\t#np.savetxt(f, c, fmt='%.0f', delimiter=' ')\n\t#f.close()\n\n#averageWeights(\".\", 40, \"10.0\")\naverageWeights(\".\", 40, \"20.0\")\n#averageWeights(\".\", 40, \"28810.0\")\n" ]
[ [ "numpy.savetxt", "numpy.set_printoptions", "numpy.zeros" ] ]
tgisaturday/PPAP
[ "bac203b2c98ab9dec7b96ec44fb61cd2d778ab22" ]
[ "image_PPAP/train_ppap.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom utils.utils import *\nfrom model import *\nimport sys\nimport os\nimport math\nimport time\nfrom utils.data_helper import data_loader\nfrom model import xavier_init, he_normal_init\n\ndataset = sys.argv[1]\nmodel_name = sys.argv[2]\nprev_iter = int(sys.argv[3])\n\nmb_size, X_dim, width, height, channels,len_x_train, x_train, len_x_test, x_test = data_loader(dataset)\n \n \ngraph = tf.Graph()\nwith graph.as_default():\n session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\n sess = tf.Session(config=session_conf)\n\n with sess.as_default():\n #input placeholder\n input_shape=[None, width, height, channels]\n filter_sizes=[5, 5, 5, 5, 5] \n hidden = 128\n z_dim = 128 \n\n if dataset == 'celebA' or dataset == 'lsun': \n n_filters=[channels, hidden, hidden*2, hidden*4, hidden*8]\n else: \n n_filters=[channels, hidden, hidden*2, hidden*4]\n \n X = tf.placeholder(tf.float32, shape=[None, width, height,channels])\n A_true_flat = X\n \n #autoencoder variables\n var_G = []\n var_H = []\n var_A = []\n #discriminator variables\n W1 = tf.Variable(he_normal_init([5,5,channels, hidden//2]))\n W2 = tf.Variable(he_normal_init([5,5, hidden//2,hidden]))\n W3 = tf.Variable(he_normal_init([5,5,hidden,hidden*2]))\n if dataset == 'celebA' or dataset == 'lsun':\n W4 = tf.Variable(he_normal_init([5,5,hidden*2,hidden*4]))\n W5 = tf.Variable(xavier_init([4*4*hidden*4, 1]))\n b5 = tf.Variable(tf.zeros(shape=[1]))\n var_D = [W1,W2,W3,W4,W5,b5] \n else:\n W4 = tf.Variable(xavier_init([4*4*hidden*2, 1]))\n b4 = tf.Variable(tf.zeros(shape=[1]))\n var_D = [W1,W2,W3,W4,b4] \n \n global_step = tf.Variable(0, name=\"global_step\", trainable=False) \n\n G_sample, latent_z = ppap_autoencoder(input_shape, n_filters, filter_sizes,z_dim, A_true_flat,var_A, var_G)\n G_hacked = hacker(input_shape, n_filters, filter_sizes,z_dim, G_sample, var_H)\n \n D_real_logits = discriminator(A_true_flat, var_D)\n D_fake_logits = discriminator(G_sample, var_D)\n \n gp = gradient_penalty(G_sample, A_true_flat, mb_size,var_D)\n D_loss = tf.reduce_mean(D_fake_logits) - tf.reduce_mean(D_real_logits) +10.0*gp \n\n privacy_gain = tf.reduce_mean(tf.pow(A_true_flat - G_hacked,2)) \n A_loss = tf.reduce_mean(tf.pow(A_true_flat - G_sample,2)) \n G_loss = -tf.reduce_mean(D_fake_logits) - privacy_gain\n H_loss = privacy_gain \n \n latent_max = tf.reduce_max(latent_z, axis = 0)\n latent_min = tf.reduce_min(latent_z, axis = 0)\n tf.summary.image('Original',A_true_flat) \n tf.summary.image('fake',G_sample)\n tf.summary.image('decoded_from_fake',G_hacked)\n tf.summary.scalar('D_loss', D_loss) \n tf.summary.scalar('G_loss',-tf.reduce_mean(D_fake_logits))\n tf.summary.scalar('A_loss', A_loss)\n tf.summary.scalar('privacy_gain',privacy_gain)\n merged = tf.summary.merge_all()\n\n num_batches_per_epoch = int((len_x_train-1)/mb_size) + 1\n \n A_solver = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5, beta2=0.9).minimize(A_loss,var_list=var_A, global_step=global_step) \n D_solver = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5, beta2=0.9).minimize(D_loss,var_list=var_D, global_step=global_step)\n G_solver = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5, beta2=0.9).minimize(G_loss,var_list=var_G, global_step=global_step)\n H_solver = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5, beta2=0.9).minimize(H_loss,var_list=var_H, global_step=global_step)\n \n timestamp = str(int(time.time()))\n if not os.path.exists('results/PPAP/'):\n os.makedirs('results/PPAP/') \n out_dir = os.path.abspath(os.path.join(os.path.curdir, \"results/PPAP/models/{}_\".format(dataset) +model_name))\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists('results/PPAP/models/'):\n os.makedirs('results/PPAP/models/')\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables())\n if not os.path.exists('results/PPAP/dc_out_{}/'.format(dataset)):\n os.makedirs('results/PPAP/dc_out_{}/'.format(dataset)) \n\n train_writer = tf.summary.FileWriter('results/graphs/PPAP/{}'.format(dataset),sess.graph)\n saver = tf.train.Saver(tf.global_variables())\n sess.run(tf.global_variables_initializer())\n if prev_iter != 0:\n saver.restore(sess,tf.train.latest_checkpoint(checkpoint_dir)) \n i = prev_iter \n if prev_iter == 0:\n for idx in range(num_batches_per_epoch*10):\n if dataset == 'mnist':\n X_mb, _ = x_train.train.next_batch(mb_size)\n X_mb = np.reshape(X_mb,[-1,28,28,1])\n elif dataset == 'lsun':\n X_mb = x_train.next_batch(mb_size) \n else:\n X_mb = next_batch(mb_size, x_train) \n summary,_, A_loss_curr= sess.run([merged, A_solver, A_loss],feed_dict={X: X_mb})\n current_step = tf.train.global_step(sess, global_step)\n train_writer.add_summary(summary,current_step)\n if idx % 100 == 0:\n print('Iter: {}; A_loss: {:.4};'.format(idx,A_loss_curr))\n if idx % 1000 == 0: \n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print('Saved model at {} at step {}'.format(path, current_step))\n for idx in range(num_batches_per_epoch):\n if dataset == 'mnist':\n X_mb, _ = x_train.train.next_batch(mb_size)\n X_mb = np.reshape(X_mb,[-1,28,28,1])\n elif dataset == 'lsun':\n X_mb = x_train.next_batch(mb_size) \n else:\n X_mb = next_batch(mb_size, x_train) \n max_curr, min_curr = sess.run([latent_max,latent_min], feed_dict ={X: X_mb})\n if idx == 0:\n z_max = max_curr\n z_min = min_curr\n else:\n z_max = np.maximum(z_max,max_curr)\n z_min = np.minimum(z_min,min_curr)\n z_sensitivity = np.abs(np.subtract(z_max,z_min))\n print(\"Approximated Global Sensitivity:\")\n print(z_sensitivity)\n if prev_iter == 0: \n sess.run(tf.variables_initializer(var_list=var_G))\n for it in range(num_batches_per_epoch*1000):\n for _ in range(5):\n if dataset == 'mnist':\n X_mb, _ = x_train.train.next_batch(mb_size)\n X_mb = np.reshape(X_mb,[-1,28,28,1])\n elif dataset == 'lsun':\n X_mb = x_train.next_batch(mb_size) \n else:\n X_mb = next_batch(mb_size, x_train)\n _, D_loss_curr = sess.run([D_solver, D_loss],feed_dict={X: X_mb}) \n \n _, H_loss_curr = sess.run([H_solver, H_loss],feed_dict={X: X_mb}) \n summary,_,G_loss_curr = sess.run([merged, G_solver, G_loss],feed_dict={X: X_mb})\n current_step = tf.train.global_step(sess, global_step)\n train_writer.add_summary(summary,current_step)\n \n if it % 100 == 0:\n print('Iter: {}; D_loss: {:.4}; G_loss: {:.4}; privacy_gain: {:.4};'.format(it,D_loss_curr, G_loss_curr,H_loss_curr))\n\n if it % 1000 == 0: \n Xt_mb = x_test[:mb_size]\n G_sample_curr,re_fake_curr = sess.run([G_sample, G_hacked], feed_dict={X: Xt_mb})\n samples_flat = tf.reshape(G_sample_curr,[-1,width,height,channels]).eval()\n img_set = np.append(Xt_mb[:256], samples_flat[:256], axis=0) \n samples_flat = tf.reshape(re_fake_curr,[-1,width,height,channels]).eval() \n img_set = np.append(img_set, samples_flat[:256], axis=0) \n\n fig = plot(img_set, width, height, channels)\n plt.savefig('results/PPAP/dc_out_{}/{}.png'.format(dataset,str(i).zfill(3)), bbox_inches='tight')\n plt.close(fig)\n i += 1\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print('Saved model at {} at step {}'.format(path, current_step))\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.reduce_max", "tensorflow.summary.image", "numpy.subtract", "tensorflow.reshape", "tensorflow.Variable", "tensorflow.train.global_step", "numpy.append", "tensorflow.global_variables_initializer", "numpy.reshape", "tensorflow.Graph", "tensorflow.reduce_min", "tensorflow.variables_initializer", "numpy.minimum", "tensorflow.pow", "tensorflow.global_variables", "tensorflow.Session", "numpy.maximum", "tensorflow.ConfigProto", "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.summary.merge_all", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.train.latest_checkpoint" ] ]
Chutlhu/python_kickstart
[ "3fa7ee6830fa8c99b7e9887206d7fcda7361d292" ]
[ "src/models/train_model.py" ]
[ "from sklearn import svm\n\ndef train(data, target, C, gamma):\n clf = svm.SVC(C, 'rbf', gamma=gamma)\n clf.fit(data[:90],\n target[:90])\n return clf\n" ]
[ [ "sklearn.svm.SVC" ] ]
THUDM/cogdl
[ "37359d559ae4f9f2c0c34d851abaa0a0950d120a" ]
[ "cogdl/models/nn/agc.py" ]
[ "import torch\nimport numpy as np\nfrom sklearn.cluster import SpectralClustering\n\nfrom cogdl.utils import spmm\nfrom .. import BaseModel, register_model\n\n\n@register_model(\"agc\")\nclass AGC(BaseModel):\n r\"\"\"The AGC model from the `\"Attributed Graph Clustering via Adaptive Graph Convolution\"\n <https://arxiv.org/abs/1906.01210>`_ paper\n\n Args:\n num_clusters (int) : Number of clusters.\n max_iter (int) : Max iteration to increase k\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n # fmt: off\n parser.add_argument(\"--num-clusters\", type=int, default=7)\n parser.add_argument(\"--max-iter\", type=int, default=10)\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(args.num_clusters, args.max_iter, args.cpu)\n\n def __init__(self, num_clusters, max_iter, cpu):\n super(AGC, self).__init__()\n\n self.num_clusters = num_clusters\n self.max_iter = max_iter\n\n self.device = \"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\"\n\n def forward(self, data):\n data = data.to(self.device)\n self.num_nodes = data.x.shape[0]\n graph = data\n graph.add_remaining_self_loops()\n\n graph.sym_norm()\n graph.edge_weight = data.edge_weight * 0.5\n\n pre_intra = 1e27\n pre_feat = None\n for t in range(1, self.max_iter + 1):\n x = data.x\n for i in range(t):\n x = spmm(graph, x)\n k = torch.mm(x, x.t())\n w = (torch.abs(k) + torch.abs(k.t())) / 2\n clustering = SpectralClustering(\n n_clusters=self.num_clusters, assign_labels=\"discretize\", random_state=0\n ).fit(w.detach().cpu())\n clusters = clustering.labels_\n intra = self.compute_intra(x.cpu().numpy(), clusters)\n print(\"iter #%d, intra = %.4lf\" % (t, intra))\n if intra > pre_intra:\n features_matrix = pre_feat\n return features_matrix\n pre_intra = intra\n pre_feat = w\n features_matrix = w\n return features_matrix.cpu()\n\n def compute_intra(self, x, clusters):\n num_nodes = x.shape[0]\n intra = np.zeros(self.num_clusters)\n num_per_cluster = np.zeros(self.num_clusters)\n for i in range(num_nodes):\n for j in range(i + 1, num_nodes):\n if clusters[i] == clusters[j]:\n intra[clusters[i]] += np.sum((x[i] - x[j]) ** 2) ** 0.5\n num_per_cluster[clusters[i]] += 1\n intra = np.array(list(filter(lambda x: x > 0, intra)))\n num_per_cluster = np.array(list(filter(lambda x: x > 0, num_per_cluster)))\n return np.mean(intra / num_per_cluster)\n" ]
[ [ "numpy.sum", "numpy.zeros", "torch.cuda.is_available", "torch.abs", "sklearn.cluster.SpectralClustering", "numpy.mean" ] ]
zCFD/zutil
[ "aeccc9b3fa337ad68b03d6d21b720b5c1d0d06b8" ]
[ "zutil/__init__.py" ]
[ "\"\"\"\nCopyright (c) 2012-2017, Zenotech Ltd\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Zenotech Ltd nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL ZENOTECH LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nfrom past.builtins import execfile\nfrom builtins import zip\nfrom builtins import str\nfrom builtins import range\nfrom past.utils import old_div\nimport math\nimport sys\nfrom os import path\nimport numpy as np\nimport imp\n\n\ndef get_parameters_from_file(filename):\n conf = filename\n mymodule = __import__(conf)\n # Force a reload just in case it has already been loaded\n imp.reload(mymodule)\n return getattr(sys.modules[conf], \"parameters\")\n\n\ndef include(filename):\n \"\"\"\n include a file by executing it. This imports everything including\n variables into the calling module\n \"\"\"\n if path.exists(filename):\n exec(compile(open(filename, \"rb\").read(), filename, \"exec\"))\n\n\ndef get_zone_info(module_name):\n try:\n # mymodule = __import__(module_name)\n # Force a reload just in case it has already been loaded\n # reload(mymodule)\n # return mymodule\n import importlib\n\n return importlib.import_module(module_name)\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n return None\n\n\ndef get_default_zone_info():\n import inspect\n\n _, filename, linenumber, _, _, _ = inspect.stack()[1]\n return get_zone_info(path.split(path.splitext(filename)[0])[1] + \"_zone\")\n\n\ndef find_next_zone(parameters, zone_prefix):\n # Find next available\n found = False\n counter = 1\n while not found:\n key = zone_prefix + \"_\" + str(counter)\n if key in parameters:\n counter += 1\n else:\n found = True\n return key\n\n\ndef vector_from_wind_dir(wind_dir_degree, wind_speed=1.0):\n \"\"\"\n Return vector given a wind direction and wind speed\n Wind dir = 0.0 -> [0.0,-1.0,0.0]\n Wind dir = 90.0 -> [-1.0,0.0,0.0]\n\n Wind dir - Meteorological wind direction\n (direction from which wind is blowing)\n\n u -> Zone Velocity (Towards East)\n v -> Meridional Velocity (Towards North)\n\n \"\"\"\n\n return [\n -wind_speed * math.sin(math.radians(wind_dir_degree)),\n -wind_speed * math.cos(math.radians(wind_dir_degree)),\n 0.0,\n ]\n\n\ndef wind_direction(uvel, vvel):\n \"\"\"\n Calculate meteorological wind direction from velocity vector\n \"\"\"\n return math.degrees(math.atan2(-uvel, -vvel))\n\n\ndef vector_from_angle(alpha, beta, mag=1.0):\n \"\"\"\n Return vector given alpha and beta in degrees based on ESDU definition\n \"\"\"\n alpha = math.radians(alpha)\n beta = math.radians(beta)\n vec = [0.0, 0.0, 0.0]\n vec[0] = mag * math.cos(alpha) * math.cos(beta)\n vec[1] = mag * math.sin(beta)\n vec[2] = mag * math.sin(alpha) * math.cos(beta)\n return vec\n\n\ndef angle_from_vector(vec):\n \"\"\"\n Return vector given alpha and beta in degrees based on ESDU definition\n \"\"\"\n mag = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2])\n\n beta = math.asin(old_div(vec[1], mag))\n alpha = math.acos(old_div(vec[0], (mag * math.cos(beta))))\n alpha = math.degrees(alpha)\n beta = math.degrees(beta)\n return (alpha, beta)\n\n\ndef rotate_vector(vec, alpha_degree, beta_degree):\n \"\"\"\n Rotate vector by alpha and beta based on ESDU definition\n \"\"\"\n alpha = math.radians(alpha_degree)\n beta = math.radians(beta_degree)\n rot = [0.0, 0.0, 0.0]\n rot[0] = (\n math.cos(alpha) * math.cos(beta) * vec[0]\n + math.sin(beta) * vec[1]\n + math.sin(alpha) * math.cos(beta) * vec[2]\n )\n rot[1] = (\n -math.cos(alpha) * math.sin(beta) * vec[0]\n + math.cos(beta) * vec[1]\n - math.sin(alpha) * math.sin(beta) * vec[2]\n )\n rot[2] = -math.sin(alpha) * vec[0] + math.cos(alpha) * vec[2]\n return rot\n\n\ndef feet_to_meters(val):\n return val * 0.3048\n\n\ndef pressure_from_alt(alt):\n \"\"\"\n Calculate pressure in Pa from altitude in m using standard atmospheric tables\n \"\"\"\n return 101325.0 * math.pow((1.0 - 2.25577e-5 * alt), 5.25588)\n\n\ndef to_kelvin(rankine):\n return rankine * 0.555555555\n\n\n# def non_dim_time(dim_time):\n# speed = 0.2 * math.sqrt(1.4 * 287.0 * 277.77)\n# non_dim_speed = 0.2 * math.sqrt(0.2)\n# return dim_time * speed / non_dim_speed\n\n\ndef dot(vec1, vec2):\n return vec1[0] * vec2[0] + vec1[1] * vec2[1] + vec1[2] * vec2[2]\n\n\ndef mag(vec):\n return math.sqrt(dot(vec, vec))\n\n\ndef R_2vect(R, vector_orig, vector_fin):\n \"\"\"Calculate the rotation matrix required to rotate from one vector to another.\n\n For the rotation of one vector to another, there are an infinit series of rotation matrices\n possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry\n plane between the two vectors. Hence the axis-angle convention will be used to construct the\n matrix with the rotation axis defined as the cross product of the two vectors. The rotation\n angle is the arccosine of the dot product of the two unit vectors.\n\n Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,\n the rotation matrix R is::\n\n | 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |\n R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |\n | -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |\n\n\n @param R: The 3x3 rotation matrix to update.\n @type R: 3x3 numpy array\n @param vector_orig: The unrotated vector defined in the reference frame.\n @type vector_orig: numpy array, len 3\n @param vector_fin: The rotated vector defined in the reference frame.\n @type vector_fin: numpy array, len 3\n \"\"\"\n # Python module imports.\n from math import acos, atan2, cos, pi, sin\n from numpy import array, cross, dot, float64, hypot, zeros\n from numpy.linalg import norm\n from random import gauss, uniform\n\n # Convert the vectors to unit vectors.\n vector_orig = old_div(vector_orig, norm(vector_orig))\n vector_fin = old_div(vector_fin, norm(vector_fin))\n\n # The rotation axis (normalised).\n axis = cross(vector_orig, vector_fin)\n axis_len = norm(axis)\n if axis_len != 0.0:\n axis = old_div(axis, axis_len)\n\n # Alias the axis coordinates.\n x = axis[0]\n y = axis[1]\n z = axis[2]\n\n # The rotation angle.\n angle = acos(dot(vector_orig, vector_fin))\n\n # Trig functions (only need to do this maths once!).\n ca = cos(angle)\n sa = sin(angle)\n\n # Calculate the rotation matrix elements.\n R[0, 0] = 1.0 + (1.0 - ca) * (x ** 2 - 1.0)\n R[0, 1] = -z * sa + (1.0 - ca) * x * y\n R[0, 2] = y * sa + (1.0 - ca) * x * z\n R[1, 0] = z * sa + (1.0 - ca) * x * y\n R[1, 1] = 1.0 + (1.0 - ca) * (y ** 2 - 1.0)\n R[1, 2] = -x * sa + (1.0 - ca) * y * z\n R[2, 0] = -y * sa + (1.0 - ca) * x * z\n R[2, 1] = x * sa + (1.0 - ca) * y * z\n R[2, 2] = 1.0 + (1.0 - ca) * (z ** 2 - 1.0)\n\n\ndef vector_vector_rotate(vec, axis, origin, theta):\n # Rotate vector\n temp = [0.0, 0.0, 0.0]\n\n temp[0] = (\n (\n origin[0] * (axis[1] * axis[1] + axis[2] * axis[2])\n - axis[0] * (origin[1] * axis[1] + origin[2] * axis[2] - dot(axis, vec))\n )\n * (1.0 - math.cos(theta))\n + vec[0] * math.cos(theta)\n + (\n -origin[2] * axis[1]\n + origin[1] * axis[2]\n - axis[2] * vec[1]\n + axis[1] * vec[2]\n )\n * math.sin(theta)\n )\n temp[1] = (\n (\n origin[1] * (axis[0] * axis[0] + axis[2] * axis[2])\n - axis[1] * (origin[0] * axis[0] + origin[2] * axis[2] - dot(axis, vec))\n )\n * (1.0 - math.cos(theta))\n + vec[1] * math.cos(theta)\n + (\n origin[2] * axis[0]\n - origin[0] * axis[2]\n + axis[2] * vec[0]\n - axis[0] * vec[2]\n )\n * math.sin(theta)\n )\n temp[2] = (\n (\n origin[2] * (axis[0] * axis[0] + axis[1] * axis[1])\n - axis[2] * (origin[0] * axis[0] + origin[1] * axis[1] - dot(axis, vec))\n )\n * (1.0 - math.cos(theta))\n + vec[2] * math.cos(theta)\n + (\n -origin[1] * axis[0]\n + origin[0] * axis[1]\n - axis[1] * vec[0]\n + axis[0] * vec[1]\n )\n * math.sin(theta)\n )\n\n return temp\n\n\ndef unit_vector(vector):\n return vector / np.linalg.norm(vector)\n\n\ndef angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n\n\ndef rotation_matrix(axis, theta):\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array(\n [\n [aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc],\n ]\n )\n\n\ndef rotate(coord, axis, ang):\n c2 = np.dot(rotation_matrix(axis, ang), (coord[0], coord[1], coord[2]))\n return [c2[0], c2[1], c2[2]]\n\n\ndef turbine_thrust_interpolate(u_inf, thrust_coef_curve):\n wsc = np.zeros((2, len(thrust_coef_curve)))\n i = 0\n for t in thrust_coef_curve:\n wsc[0][i] = t[0]\n wsc[1][i] = t[1]\n i += 1\n\n tc = np.interp(u_inf, wsc[0], wsc[1])\n return tc\n\n\ndef turbine_speed_interpolate(u_inf, tip_speed_curve):\n wsc = np.zeros((2, len(tip_speed_curve)))\n i = 0\n for t in tip_speed_curve:\n wsc[0][i] = t[0]\n wsc[1][i] = t[1]\n i += 1\n\n ts = np.interp(u_inf, wsc[0], wsc[1])\n return ts\n\n\n# area of polygon specified as counter-clockwise vertices (x,y) where last = first\n\n\ndef polygon_area(x, y):\n a = 0.0\n for i in range(len(x) - 1):\n a += 0.5 * (x[i + 1] + x[i]) * (y[i + 1] - y[i])\n return a\n\n\ndef trapezoid(x, y):\n a = 0\n for i in range(len(x) - 1):\n a += 0.5 * (y[i + 1] + y[i]) * (x[i + 1] - x[i])\n return a\n\n\n# Optimal power coefficient as a function of (a function of) tip speed ratio\n# \"A Compact, Closed-form Solution for the Optimum, Ideal Wind Turbine\" (Peters, 2012)\n\n\ndef glauert_peters(y):\n p1 = 16.0 * (1.0 - 2.0 * y) / (27.0 * (1.0 + y / 4.0))\n p2 = old_div(\n (math.log(2.0 * y) + (1.0 - 2.0 * y) + 0.5 * (1.0 - 2.0 * y) ** 2),\n ((1.0 - 2.0 * y) ** 3),\n )\n p3 = (\n 1.0\n + (457.0 / 1280.0) * y\n + (51.0 / 640.0) * y ** 2\n + y ** 3 / 160.0\n + 3.0 / 2.0 * y * p2\n )\n power_coeff = p1 * p3\n return power_coeff\n\n\n# Golden section search: Given f with a single local max in [a,b], gss returns interval [c,d] with d-c <= tol.\n\n\ndef gss(f, a, b, tol=1e-5):\n invphi = old_div((math.sqrt(5) - 1), 2) # 1/phi\n invphi2 = old_div((3 - math.sqrt(5)), 2) # 1/phi^2\n (a, b) = (min(a, b), max(a, b))\n h = b - a\n if h <= tol:\n return (a, b)\n n = int(math.ceil(old_div(math.log(old_div(tol, h)), math.log(invphi))))\n c = a + invphi2 * h\n d = a + invphi * h\n yc = f(c)[0]\n yd = f(d)[0]\n for k in range(n - 1):\n if yc > yd:\n b = d\n d = c\n yd = yc\n h = invphi * h\n c = a + invphi2 * h\n yc = f(c)[0]\n else:\n a = c\n c = d\n yc = yd\n h = invphi * h\n d = a + invphi * h\n yd = f(d)[0]\n if yc > yd:\n return (a, d)\n else:\n return (c, b)\n\n\ndef create_annulus(turbine_zone_dict):\n from mpi4py import MPI\n from numpy import zeros, array, dot, linalg, cross\n\n if \"verbose\" in turbine_zone_dict:\n verbose = turbine_zone_dict[\"verbose\"]\n else:\n verbose = False\n\n if \"number of segments\" in turbine_zone_dict:\n number_of_segments = turbine_zone_dict[\"number of segments\"]\n else:\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\"NO NUMBER OF SEGMENTS SPECIFIED - SETTING TO DEFAULT 12\")\n number_of_segments = 12\n\n if \"inner radius\" in turbine_zone_dict:\n ri = turbine_zone_dict[\"inner radius\"]\n else:\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"NO INNER RADIUS SPECIFIED\")\n\n if \"outer radius\" in turbine_zone_dict:\n ro = turbine_zone_dict[\"outer radius\"]\n else:\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"NO OUTER RADIUS SPECIFIED\")\n\n disc_centre = turbine_zone_dict[\"centre\"]\n disc_normal = turbine_zone_dict[\"normal\"]\n\n rotor_swept_area = (math.pi * ro * ro) - (math.pi * ri * ri)\n\n annulus = []\n dtheta = math.radians(360.0 / number_of_segments)\n theta = 0.0\n total_area = 0.0\n for i in range(number_of_segments):\n r = ri\n while r < ro:\n dr = dtheta * max(r,0.01*ro) / (1.0 - 0.5 * dtheta)\n max_r = r + dr\n if max_r > ro:\n dr = ro - r\n rp = r + 0.5 * dr\n da = dtheta * rp * dr\n disc_theta = i * dtheta + 0.5 * dtheta\n\n disc_pt = np.array(\n [rp * math.cos(disc_theta), rp * math.sin(disc_theta), 0.0]\n )\n\n # Rotate so that z points in the direction of the normal\n R = np.zeros((3, 3))\n vector_orig = np.array([0.0, 0.0, 1.0])\n vector_fin = np.zeros(3)\n for j in range(3):\n vector_fin[j] = disc_normal[j]\n R_2vect(R, vector_orig, vector_fin)\n\n disc_pt = dot(R, disc_pt)\n\n # translate to disc centre\n for j in range(3):\n disc_pt[j] += disc_centre[j]\n\n annulus.append(\n (r, dr, i * dtheta, dtheta, disc_pt[0], disc_pt[1], disc_pt[2])\n )\n total_area += da\n r = r + dr\n\n return annulus\n\n\ndef zone_default(dict, key, default_val, verbose=True):\n from mpi4py import MPI\n\n if key in dict:\n value = dict[key]\n else:\n value = default_val\n dict[key] = default_val\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n if \"name\" in dict:\n print(\n \"Turbine zone \"\n + str(dict[\"name\"])\n + \" missing: \"\n + str(key)\n + \" - setting to \"\n + str(default_val)\n )\n else:\n print(\n \"Turbine zone missing name and missing: \"\n + str(key)\n + \" - setting to \"\n + str(default_val)\n )\n return value\n\n\ndef calculate_aerofoil_section_area(tzd):\n upper = zone_default(\n tzd[\"aerofoil profile\"],\n \"upper surface\",\n [[0.0, 0.0], [0.5, 0.1], [1.0, 0.0]],\n True,\n )\n lower = zone_default(\n tzd[\"aerofoil profile\"],\n \"lower surface\",\n [[0.0, 0.0], [0.5, -0.1], [1.0, 0.0]],\n True,\n )\n x = np.concatenate((np.array(lower).T[0], np.array(upper).T[0][::-1][1:]))\n y = np.concatenate((np.array(lower).T[1], np.array(upper).T[1][::-1][1:]))\n aerofoil_section_area = polygon_area(x, y)\n tzd[\"aerofoil section area\"] = aerofoil_section_area\n return aerofoil_section_area\n\n\ndef calculate_rotor_moment(tzd):\n from mpi4py import MPI\n\n nblades = zone_default(tzd, \"number of blades\", 3, True)\n blade_material_density = zone_default(\n tzd, \"mean blade material density\", 200.0, True\n )\n if \"aerofoil section area\" in tzd:\n aerofoil_section_area = tzd[\"aerofoil section area\"]\n else:\n aerofoil_section_area = calculate_aerofoil_section_area(tzd)\n blade_chord = zone_default(tzd, \"blade chord\", [[0.0, 0.1], [1.0, 0.1]], True)\n ri = zone_default(tzd, \"inner radius\", 1.0, True)\n ro = zone_default(tzd, \"outer radius\", 30.0, True)\n rotor_moment = 0.0\n for r in np.linspace(ri, ro, 100):\n dr = (ro - ri) / 100.0\n c = (\n np.interp(r / ro, np.array(blade_chord).T[0], np.array(blade_chord).T[1])\n * ro\n )\n rotor_moment += r * r * c * c * dr\n rotor_moment = (\n rotor_moment * blade_material_density * aerofoil_section_area * nblades\n )\n tzd[\"rotor moment of inertia\"] = rotor_moment\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"rotor moment of inertia = \" + str(rotor_moment))\n return rotor_moment\n\n\ndef create_turbine_segments(\n turbine_zone_dict,\n v0,\n v1,\n v2,\n density,\n turbine_name_dict={},\n turbine_name=\"\",\n annulusVel=None,\n annulusTi=None,\n):\n from mpi4py import MPI\n\n verbose = zone_default(turbine_zone_dict, \"verbose\", True, False)\n number_of_segments = zone_default(\n turbine_zone_dict, \"number of segments\", 12, verbose\n )\n rotation_direction = zone_default(\n turbine_zone_dict, \"rotation direction\", \"clockwise\", verbose\n ) # when viewed from the front\n ri = zone_default(turbine_zone_dict, \"inner radius\", 1.0, verbose)\n ro = zone_default(turbine_zone_dict, \"outer radius\", 30.0, verbose)\n rotor_swept_area = math.pi * (ro * ro - ri * ri)\n disc_normal = zone_default(turbine_zone_dict, \"normal\", [1.0, 0.0, 0.0], verbose)\n disc_centre = zone_default(turbine_zone_dict, \"centre\", [0.0, 0.0, 0.0], verbose)\n up = zone_default(turbine_zone_dict, \"up\", [0.0, 0.0, 1.0], verbose)\n yaw = zone_default(turbine_zone_dict, \"yaw\", 0.0, verbose)\n auto_yaw = zone_default(turbine_zone_dict, \"auto yaw\", False, verbose)\n tilt = zone_default(turbine_zone_dict, \"tilt\", 0.0, verbose)\n inertia = zone_default(turbine_zone_dict, \"inertia\", False, verbose)\n model = zone_default(turbine_zone_dict, \"model\", \"simple\", verbose)\n status = zone_default(turbine_zone_dict, \"status\", \"on\", verbose)\n use_glauert_power = zone_default(\n turbine_zone_dict, \"use glauert power\", False, verbose\n )\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(model)\n induction = \"induction\" in model\n bet = \"blade element theory\" in model\n simple = \"simple\" in model or \"direct\" in model\n bet_prop = \"blade element propellor\" in model\n if not (induction or bet or simple or bet_prop):\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"NO MODEL SPECIFIED - DEFAULT TO SIMPLE MODEL\")\n simple = True\n\n annulus_metrics = create_annulus(turbine_zone_dict)\n global bet_kernel_calls\n\n if inertia:\n dt = zone_default(turbine_zone_dict, \"dt\", 0.1, verbose)\n if \"rotor moment of inertia\" in turbine_zone_dict:\n rotor_moment = turbine_zone_dict[\"rotor moment of inertia\"]\n else:\n rotor_moment = calculate_rotor_moment(turbine_zone_dict)\n\n if bet_prop:\n temp = np.reshape(annulusVel, (-1, 3)).T\n u_ref = math.sqrt(\n np.mean(temp[0]) ** 2 + np.mean(temp[1]) ** 2 + np.mean(temp[2]) ** 2\n )\n nblades = zone_default(turbine_zone_dict, \"number of blades\", 3, verbose)\n aerofoil_cl = zone_default(\n turbine_zone_dict, \"aerofoil cl\", [[-90.0, 0.0], [90.0, 0.0]], verbose\n )\n aerofoil_cd = zone_default(\n turbine_zone_dict, \"aerofoil cd\", [[-90.0, 1.0], [90.0, 1.0]], verbose\n )\n blade_chord = zone_default(\n turbine_zone_dict, \"blade chord\", [[0.0, 0.1], [1.0, 0.1]], verbose\n )\n blade_twist = zone_default(\n turbine_zone_dict, \"blade twist\", [[0.0, 25.0], [1.0, 0.0]], verbose\n ) # degrees\n omega = zone_default(turbine_zone_dict, \"omega\", 0.0, verbose)\n ts = omega * ro / u_ref\n tip_loss_correction = \"tip loss correction\" in turbine_zone_dict\n if tip_loss_correction:\n tip_loss_correction_model = zone_default(\n turbine_zone_dict, \"tip loss correction\", \"none\", verbose\n )\n tip_loss_correction_r = zone_default(\n turbine_zone_dict, \"tip loss correction radius\", 0.0, verbose\n )\n elif bet:\n bet_kernel_calls = 0\n temp = np.reshape(annulusVel, (-1, 3)).T\n u_ref = math.sqrt(\n np.mean(temp[0]) ** 2 + np.mean(temp[1]) ** 2 + np.mean(temp[2]) ** 2\n )\n nblades = zone_default(turbine_zone_dict, \"number of blades\", 3, verbose)\n aerofoil_cl = zone_default(\n turbine_zone_dict, \"aerofoil cl\", [[-90.0, 0.0], [90.0, 0.0]], verbose\n )\n aerofoil_cd = zone_default(\n turbine_zone_dict, \"aerofoil cd\", [[-90.0, 1.0], [90.0, 1.0]], verbose\n )\n blade_chord = zone_default(\n turbine_zone_dict, \"blade chord\", [[0.0, 0.1], [1.0, 0.1]], verbose\n )\n blade_twist = zone_default(\n turbine_zone_dict, \"blade twist\", [[0.0, 25.0], [1.0, 0.0]], verbose\n ) # degrees\n blade_pitch_range = zone_default(\n turbine_zone_dict, \"blade pitch range\", [-10.0, 10.0], verbose\n ) # degrees\n blade_pitch_step = zone_default(\n turbine_zone_dict, \"blade pitch step\", 1.0, verbose\n ) # degrees\n blade_pitch = zone_default(\n turbine_zone_dict, \"blade pitch\", 0.0, verbose\n ) # degrees\n blade_pitch_tol = zone_default(\n turbine_zone_dict, \"blade pitch tol\", 0.01, verbose\n ) # degrees\n dt = zone_default(turbine_zone_dict, \"dt\", 0.1, verbose) # seconds\n rated_power = zone_default(\n turbine_zone_dict, \"rated power\", 2.3e6, verbose\n ) # Watts\n # m/s environmental limit (2009)\n tip_speed_limit = zone_default(\n turbine_zone_dict, \"tip speed limit\", 80.0, verbose\n )\n # turbulence intensity range [0,1]\n damage_ti = zone_default(turbine_zone_dict, \"damage ti\", 0.15, verbose)\n damage_speed = zone_default(\n turbine_zone_dict, \"damage speed\", 10.0, verbose\n ) # m/s\n friction_loss = zone_default(\n turbine_zone_dict, \"friction loss\", 0.01, verbose\n ) # friction slow down\n cut_in_speed = zone_default(\n turbine_zone_dict, \"cut in speed\", 1.0, verbose\n ) # m/s\n cut_out_speed = zone_default(\n turbine_zone_dict, \"cut out speed\", 99.0, verbose\n ) # m/s\n thrust_factor = zone_default(turbine_zone_dict, \"thrust factor\", 1.0, verbose)\n omega = zone_default(turbine_zone_dict, \"omega\", 0.0, verbose)\n tip_loss_correction = \"tip loss correction\" in turbine_zone_dict\n if tip_loss_correction:\n tip_loss_correction_model = zone_default(\n turbine_zone_dict, \"tip loss correction\", \"none\", verbose\n )\n tip_loss_correction_r = zone_default(\n turbine_zone_dict, \"tip loss correction radius\", 0.0, verbose\n )\n if (u_ref < cut_in_speed) or (u_ref > cut_out_speed):\n omega = 0.0\n ts = omega * ro / u_ref\n if induction:\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"CANNOT USE BLADE ELEMENT THEORY WITH INDUCTION MODEL\")\n induction = False\n else:\n power_model = zone_default(turbine_zone_dict, \"power model\", None, False)\n if power_model == \"glauert\":\n use_glauert_power = True\n u_ref = math.sqrt(v0 * v0 + v1 * v1 + v2 * v2)\n if \"thrust coefficient curve\" in turbine_zone_dict:\n tc = np.interp(\n u_ref,\n np.array(turbine_zone_dict[\"thrust coefficient curve\"]).T[0],\n np.array(turbine_zone_dict[\"thrust coefficient curve\"]).T[1],\n )\n elif \"thrust coefficient\" in turbine_zone_dict:\n tc = turbine_zone_dict[\"thrust coefficient\"]\n else:\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"NO THRUST COEFFICIENT SPECIFIED\")\n\n if \"tip speed ratio curve\" in turbine_zone_dict:\n ts = np.interp(\n u_ref,\n np.array(turbine_zone_dict[\"tip speed ratio curve\"]).T[0],\n np.array(turbine_zone_dict[\"tip speed ratio curve\"]).T[1],\n )\n elif \"tip speed ratio\" in turbine_zone_dict:\n ts = turbine_zone_dict[\"tip speed ratio\"]\n else:\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"NO TIP SPEED RATIO SPECIFIED\")\n omega = old_div(ts * u_ref, ro)\n\n if induction:\n u_infty = u_ref\n else:\n # Assuming 1D momentum theory and the Betz limit\n u_infty = (3.0 / 2.0) * u_ref\n\n betz_power = 0.5 * density * u_infty ** 3 * rotor_swept_area * (16.0 / 27.0)\n if use_glauert_power:\n if \"glauert power curve\" not in turbine_zone_dict:\n gp_curve = []\n ts_vals = np.arange(0.0, 20.0, 0.1)\n b_vals = np.arange(0.3334, 0.5, 0.0001)\n peters_lr_vals = []\n for b in b_vals:\n peters_lr_vals.append(\n old_div(\n math.sqrt(1.0 + b) * (1.0 - 2.0 * b), math.sqrt(3.0 * b - 1.0)\n )\n )\n for ts_val in ts_vals:\n b0 = np.interp(ts, peters_lr_vals[::-1], b_vals[::-1])\n y = 3.0 * b0 - 1.0\n gp = 0.5 * density * u_infty ** 3 * rotor_swept_area * glauert_peters(y)\n gp.append([ts_val, gp])\n turbine_zone_dict[\"glauert power curve\"] = gp\n glauert_power = np.interp(\n ts,\n turbine_zone_dict[\"glauert power curve\"].T[0],\n turbine_zone_dict[\"glauert power curve\"].T[1],\n )\n\n if verbose and (MPI.COMM_WORLD.Get_rank() == 0):\n print(\"tip speed ratio = \" + str(ts))\n print(\"rotational speed = \" + str(omega) + \" rad/s\")\n print(\"wind speed = \" + str(u_ref) + \" m/s\")\n print(\"rotor swept area = \" + str(rotor_swept_area) + \" m^2\")\n print(\"density = \" + str(density) + \" kg/m^3\")\n print(\"number of segments = \" + str(number_of_segments))\n\n if not bet_prop:\n\n def yaw_control(yaw, tilt, disc_normal, up, auto_yaw, annulusVel):\n if auto_yaw:\n temp = np.reshape(annulusVel, (-1, 3)).T\n u_normal = [-np.mean(temp[0]), -np.mean(temp[1]), -np.mean(temp[2])]\n ang = angle_between(u_normal, disc_normal)\n if np.degrees(ang) > 10.0:\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\n \"Auto_yaw: geometric disc normal and local flow angle too large: \"\n + str(np.degrees(ang))\n )\n else:\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\n \"Auto-yaw: set disc normal to disc-averaged velocity normal\"\n )\n yaw = math.degrees(angle_between(disc_normal, u_normal))\n disc_normal = u_normal\n else:\n disc_normal = rotate(disc_normal, up, math.radians(yaw))\n tilt_axis = np.cross(disc_normal, up)\n disc_normal = rotate(disc_normal, tilt_axis, math.radians(tilt))\n if np.dot(disc_normal, up) < 0.0 and MPI.COMM_WORLD.Get_rank() == 0:\n print(\"Tilting wrong way!\")\n return yaw, unit_vector(disc_normal)\n\n yaw, disc_normal = yaw_control(yaw, tilt, disc_normal, up, auto_yaw, annulusVel)\n if (MPI.COMM_WORLD.Get_rank() == 0) and verbose:\n print(\"disc_normal = \" + str(disc_normal))\n\n if bet_prop:\n annulus = []\n theta = 0.0\n total_area = 0.0\n total_thrust = 0.0\n total_torque = 0.0\n angular_induction = 0.0\n\n avindex = 0\n # annulus_metrics = (r, dr, i * dtheta, dtheta, disc_pt[0], disc_pt[1], disc_pt[2])\n for am in annulus_metrics:\n rp = am[0] + 0.5 * am[1]\n da = am[3] * rp * am[1]\n ulocal = np.reshape(annulusVel, (-1, 3))[avindex]\n rvec = unit_vector(\n [am[4] - disc_centre[0], am[5] - disc_centre[1], am[6] - disc_centre[2]]\n )\n if rotation_direction == \"clockwise\":\n local_omega_vec = np.cross(rvec, disc_normal)\n else:\n local_omega_vec = np.cross(disc_normal, rvec)\n v_n = -np.dot(ulocal, disc_normal)\n v_r = np.dot(ulocal, local_omega_vec)\n if (abs((rp * omega) + v_r)) > 0.0:\n theta_rel = math.atan(v_n / ((rp * omega) - v_r))\n else:\n theta_rel = math.pi / 2.0\n urel = math.sqrt((rp * omega - v_r) ** 2 + v_n ** 2)\n beta_twist = np.interp(\n old_div(rp, ro), np.array(blade_twist).T[0], np.array(blade_twist).T[1]\n )\n chord = (\n np.interp(\n old_div(rp, ro),\n np.array(blade_chord).T[0],\n np.array(blade_chord).T[1],\n )\n * ro\n )\n beta = math.radians(beta_twist)\n alpha = beta - theta_rel\n cl = np.interp(\n math.degrees(alpha),\n np.array(aerofoil_cl).T[0],\n np.array(aerofoil_cl).T[1],\n )\n cd = np.interp(\n math.degrees(alpha),\n np.array(aerofoil_cd).T[0],\n np.array(aerofoil_cd).T[1],\n )\n if tip_loss_correction:\n rstar = tip_loss_correction_r * ro\n if rp > rstar:\n tip_loss_factor = math.sqrt(\n 1.0 - ((rp - rstar) / (ro - rstar)) ** 2\n )\n cl = cl * tip_loss_factor\n cd = cd * tip_loss_factor\n f_L = cl * 0.5 * density * urel ** 2 * chord\n f_D = cd * 0.5 * density * urel ** 2 * chord\n F_L = old_div(nblades, (2.0 * math.pi * rp)) * f_L\n F_D = old_div(nblades, (2.0 * math.pi * rp)) * f_D\n dt = -(F_L * math.cos(theta_rel) - F_D * math.sin(theta_rel)) * da\n dq = -(F_L * math.sin(theta_rel) + F_D * math.cos(theta_rel)) * da\n if rotation_direction == \"anticlockwise\":\n dq = -dq\n annulus.append((dt, dq, am[0], am[1], am[2], am[3]))\n total_area += da\n total_thrust += dt\n total_torque += math.fabs(dq * rp)\n avindex = avindex + 1\n total_power = total_torque * omega\n\n elif bet:\n bet_kernel_calls = 0\n\n # pre-populate the beta_twist and chord values\n for i in range(len(annulus_metrics)):\n rp = annulus_metrics[i][0] + 0.5 * annulus_metrics[i][1]\n beta_twist = np.interp(\n (rp / ro), np.array(blade_twist).T[0], np.array(blade_twist).T[1]\n )\n chord = (\n np.interp(\n (rp / ro), np.array(blade_chord).T[0], np.array(blade_chord).T[1]\n )\n * ro\n )\n annulus_metrics[i] = annulus_metrics[i] + (beta_twist, chord)\n\n annulus = len(annulus_metrics) * [(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)]\n\n def bet_kernel(beta_pitch):\n global bet_kernel_calls\n bet_kernel_calls = bet_kernel_calls + 1\n total_area = 0.0\n total_thrust = 0.0\n total_torque = 0.0\n angular_induction = 0.0\n avindex = 0\n # check whether any segments are at negative angle of attack.\n alpha_positive = True\n # annulus_metrics = (r, dr, i * dtheta, dtheta, disc_pt[0], disc_pt[1], disc_pt[2], beta_twist, chord)\n for am in annulus_metrics:\n rp = am[0] + 0.5 * am[1]\n da = am[3] * rp * am[1]\n tilt_axis = np.cross(up, disc_normal)\n rvec0 = rotate(up, tilt_axis, math.radians(tilt))\n rvec = unit_vector(\n [\n am[4] - disc_centre[0],\n am[5] - disc_centre[1],\n am[6] - disc_centre[2],\n ]\n )\n ulocal = np.reshape(annulusVel, (-1, 3))[avindex]\n if rotation_direction == \"clockwise\":\n local_omega_vec = np.cross(rvec, disc_normal)\n else:\n local_omega_vec = np.cross(disc_normal, rvec)\n omega_air = np.dot(local_omega_vec, ulocal) / rp\n omega_rel = omega - omega_air\n u_ref_local = -np.dot(ulocal, disc_normal)\n urel = math.sqrt((rp * omega_rel) ** 2 + u_ref_local ** 2)\n if (rp * omega_rel) > 0.0:\n theta_rel = math.atan(old_div(u_ref_local, (rp * omega_rel)))\n else:\n theta_rel = math.pi / 2.0\n beta_twist = am[7]\n chord = am[8]\n beta = math.radians(beta_pitch + beta_twist)\n alpha = theta_rel - beta\n if alpha < 0.0:\n alpha_positive = False\n cl = np.interp(\n math.degrees(alpha),\n np.array(aerofoil_cl).T[0],\n np.array(aerofoil_cl).T[1],\n )\n if tip_loss_correction:\n if tip_loss_correction_model == \"elliptic\":\n tlc = math.sqrt(1.0 - (rp / ro) ** 2)\n elif tip_loss_correction_model == \"acos-fit\":\n tlc = (2.0 / math.pi) * math.acos(\n math.exp(-63.0 * (1.0 - (rp / ro) ** 2))\n )\n elif tip_loss_correction_model == \"acos shift-fit\":\n tlc = (2.0 / math.pi) * math.acos(\n math.exp(-48.0 * (1.0 - (rp / ro) ** 2) - 0.5)\n )\n elif tip_loss_correction_model == \"f-fit\":\n tlc = 1.0 - 2.5 * ((1.0 - (rp / ro) ** 2) ** 0.39) / (\n (2.0 - (rp / ro) ** 2) ** 64\n )\n else:\n tlc = 1.0\n cl = cl * tlc # only apply to lift, not drag.\n cd = np.interp(\n math.degrees(alpha),\n np.array(aerofoil_cd).T[0],\n np.array(aerofoil_cd).T[1],\n )\n f_L = cl * 0.5 * density * urel ** 2 * chord\n f_D = cd * 0.5 * density * urel ** 2 * chord\n F_L = old_div(nblades, (2.0 * math.pi * rp)) * f_L\n F_D = old_div(nblades, (2.0 * math.pi * rp)) * f_D\n dt = (\n (F_L * math.cos(theta_rel) + F_D * math.sin(theta_rel)) * da\n ) * thrust_factor\n dq = -(F_L * math.sin(theta_rel) - F_D * math.cos(theta_rel)) * da\n if rotation_direction == \"clockwise\":\n dq = -dq\n annulus[avindex] = (dt, dq, am[0], am[1], am[2], am[3])\n total_area += da\n total_thrust += dt\n total_torque += math.fabs(dq * rp)\n angular_induction += omega_air * da\n avindex = avindex + 1\n if not alpha_positive:\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\"WARNING - negative angle of attack \")\n angular_induction = angular_induction / total_area\n return total_torque, total_area, total_thrust, angular_induction, annulus\n\n def turbine_controller(\n omega,\n rated_power,\n tip_speed_limit,\n damage_ti,\n damage_speed,\n status,\n u_ref,\n cut_in_speed,\n cut_out_speed,\n blade_pitch,\n blade_pitch_step,\n ):\n if status == \"off\":\n omega = 0.0\n if (u_ref < cut_in_speed) or (u_ref > cut_out_speed):\n omega = 0.0\n # Make sure we are not exceeding the blade tip speed limit\n omega = min(omega, tip_speed_limit / ro) * (1.0 - friction_loss)\n # work out whether we are feathering the blades to shed power:\n blade_pitch_low = max(blade_pitch - blade_pitch_step, blade_pitch_range[0])\n blade_pitch_high = min(blade_pitch + blade_pitch_step, blade_pitch_range[1])\n blade_pitch_opt = np.min(\n gss(bet_kernel, blade_pitch_low, blade_pitch_high, blade_pitch_tol)\n )\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\"Blade pitch opt = \" + str(blade_pitch_opt))\n maximum_torque = bet_kernel(blade_pitch_opt)[0]\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\"Maximum torque = \" + str(maximum_torque))\n if maximum_torque * omega > rated_power:\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\"Feathering to reduce power below rated power\")\n # Construct a power curve against blade pitch for the current rate of rotation\n blade_pitch_curve = []\n for b in np.arange(blade_pitch_low, blade_pitch_opt, blade_pitch_tol):\n blade_pitch_curve.append([b, omega * bet_kernel(b)[0]])\n if (MPI.COMM_WORLD.Get_rank() == 0) and verbose:\n print(\n \"Points on blade pitch curve : \" + str(len(blade_pitch_curve))\n )\n if len(blade_pitch_curve) > 1:\n # Look up the blade pitch that recovers the rated power\n blade_pitch = np.interp(\n rated_power,\n np.array(blade_pitch_curve).T[1],\n np.array(blade_pitch_curve).T[0],\n )\n else:\n blade_pitch = blade_pitch_low\n if (MPI.COMM_WORLD.Get_rank() == 0) and verbose:\n print(\"Rated power blade pitch = : \" + str(blade_pitch))\n total_torque, total_area, total_thrust, angular_induction, annulus = bet_kernel(\n blade_pitch\n )\n torque_blades = 0.0\n else:\n # Use half of the available torque to accelerate the blades and half to provide power to the generator\n # unless this exceeds a 5% increase in the rate of rotation or the tip speed limit or the rated power.\n blade_pitch = blade_pitch_opt\n total_torque, total_area, total_thrust, angular_induction, annulus = bet_kernel(\n blade_pitch\n )\n torque_blades = total_torque / 2.0 # Completely arbitrary.\n # modfy the tip speed limit if there is an rpm ramp:\n if \"rpm ramp\" in turbine_zone_dict:\n rr = np.asarray(turbine_zone_dict[\"rpm ramp\"])\n if (u_ref > cut_in_speed) and (u_ref < rr[1][1]):\n tip_speed_limit = min(\n tip_speed_limit,\n ro\n * np.interp(u_ref, rr.T[0], rr.T[1])\n * 2.0\n * np.pi\n / 60.0,\n )\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\n \"RPM LIMIT: tip speed limit = \" + str(tip_speed_limit)\n )\n if rotor_moment > 0.0:\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\"BET - Rotor Moment Model\")\n torque_blades = min(\n torque_blades,\n ((tip_speed_limit / ro) - omega) * rotor_moment / dt,\n )\n omega = omega + (torque_blades * dt) / rotor_moment\n else:\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\"BET - USING ZERO INERTIA MODEL\")\n omega = omega * 1.1 # Limit the increase to 10%\n # Do not allow the rotor to over-speed\n omega = min(omega, tip_speed_limit / ro)\n torque_blades = 0.0\n # Do not exceed the (approximated) rated power\n torque_power = total_torque - torque_blades\n if torque_power > 0.0:\n omega = min(omega, rated_power / torque_power)\n # work out whether we are stowing the blades to prevent damage\n damage_alert = False\n if (MPI.COMM_WORLD.Get_rank() == 0) and verbose:\n print(\"Maximum onset TI: \" + str(np.max(annulusTi)))\n for aindex in range(len(np.reshape(annulusVel, (-1, 3)))):\n ulocal = np.reshape(annulusVel, (-1, 3))[aindex]\n ulocalmag = math.sqrt(ulocal[0] ** 2 + ulocal[1] ** 2 + ulocal[2] ** 2)\n tilocal = annulusTi[aindex]\n if (ulocalmag > damage_speed) and (tilocal > damage_ti):\n damage_alert = True\n if damage_alert:\n if MPI.COMM_WORLD.Get_rank() == 0 and verbose:\n print(\"Damage alert detected - stowing turbine\")\n if omega > 0.1:\n omega = omega * 0.9 # slow down the turbine\n else:\n omega = 0.0\n torque_blades = 0.0\n torque_power = total_torque - torque_blades\n return (\n blade_pitch,\n omega,\n torque_blades,\n torque_power,\n total_torque,\n total_area,\n total_thrust,\n angular_induction,\n annulus,\n )\n\n blade_pitch, omega, torque_blades, torque_power, total_torque, total_area, total_thrust, angular_induction, annulus = turbine_controller(\n omega,\n rated_power,\n tip_speed_limit,\n damage_ti,\n damage_speed,\n status,\n u_ref,\n cut_in_speed,\n cut_out_speed,\n blade_pitch,\n blade_pitch_step,\n )\n\n turbine_power = torque_power * math.fabs(omega)\n total_power = total_torque * omega\n\n turbine_zone_dict[\"omega\"] = omega\n turbine_zone_dict[\"blade pitch\"] = blade_pitch\n turbine_zone_dict[\"yaw\"] = yaw\n\n if MPI.COMM_WORLD.Get_rank() == 0:\n turbine_name_dict[turbine_name + \"_tilt\"] = tilt\n turbine_name_dict[turbine_name + \"_yaw\"] = yaw\n turbine_name_dict[turbine_name + \"_blade_pitch\"] = blade_pitch\n turbine_name_dict[turbine_name + \"_ang_ind\"] = angular_induction / omega\n turbine_name_dict[turbine_name + \"_thrust\"] = total_thrust\n\n if verbose:\n print(\"status = \" + str(status))\n print(\"rotation rate = \" + str(omega) + \" radians / sec\")\n print(\"blade pitch = \" + str(blade_pitch) + \" degrees\")\n print(\"torque power = \" + str(torque_power) + \" Joules/rad\")\n print(\"torque blades = \" + str(torque_blades) + \" Joules/rad\")\n print(\n \"angular induction = \"\n + str(100.0 * angular_induction / omega)\n + \"%\"\n )\n print(\"bet kernel calls = \" + str(bet_kernel_calls))\n\n # TODO - add turbulence source in wake of turbine.\n # TODO - add restart capability (read data from CSV report file)\n\n else:\n if power_model == \"betz\":\n power = betz_power\n elif power_model == \"glauert\":\n power = glauert_power\n elif \"turbine power curve\" in turbine_zone_dict:\n power = np.interp(\n u_ref,\n np.array(turbine_zone_dict[\"turbine power curve\"]).T[0],\n np.array(turbine_zone_dict[\"turbine power curve\"]).T[1],\n )\n elif \"turbine power\" in turbine_zone_dict:\n power = turbine_zone_dict[\"turbine power\"]\n else:\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"NO POWER MODEL SPECIFIED - USING BETZ LIMIT\")\n power = betz_power\n annulus = []\n # Induction assumes that u_ref is u_inf. Direct (Simple) assumes that u_ref is at disk.\n if induction:\n # Momentum theory: Ct = 4 * a * ( 1 - a), Cp = 4 * a * ( 1 - a)^2, Betz Optimal rotor: a = 1/3\n if tc > 0.999:\n print(\"INDUCTION MODEL TC CANNOT EXCEED 1.0: \" + str(tc))\n ind_fac = old_div(\n (4.0 - math.sqrt(4.0 * 4.0 - 4.0 * 4.0 * tc)), (2.0 * 4.0)\n )\n if verbose and (MPI.COMM_WORLD.Get_rank() == 0):\n print(\"Induction factor: \", str(ind_fac))\n dtheta = math.radians(360.0 / number_of_segments)\n target_torque = old_div(power, omega)\n theta = 0.0\n total_area = 0.0\n total_thrust = 0.0\n total_torque = 0.0\n for i in range(number_of_segments):\n r = ri\n while r < ro:\n dr = old_div(dtheta * max(r,0.01*ro), (1.0 - 0.5 * dtheta))\n max_r = r + dr\n if max_r > ro:\n dr = ro - r\n rp = r + 0.5 * dr\n da = dtheta * rp * dr\n if induction:\n dt = (\n 0.5\n * density\n * u_ref\n * u_ref\n * da\n * 4.0\n * ind_fac\n * (1.0 - ind_fac)\n )\n lambda_r = old_div(rp * omega, u_ref)\n if lambda_r > 0.0:\n ang_ind_fac = -0.5 + math.sqrt(\n 0.25 + old_div(ind_fac * (1.0 - ind_fac), lambda_r ** 2)\n )\n else:\n ang_ind_fac = 0.0\n dq = (\n 4.0\n * ang_ind_fac\n * (1.0 - ind_fac)\n * 0.5\n * density\n * u_ref\n * omega\n * rp\n * rp\n * da\n / rp\n )\n else:\n dt = 0.5 * density * u_ref * u_ref * da * tc\n dq = old_div((target_torque * da), (rotor_swept_area * rp))\n if rotation_direction == \"anticlockwise\":\n dq = -dq\n annulus.append((dt, dq, r, dr, i * dtheta, dtheta))\n total_area += da\n total_thrust += dt\n total_torque += math.fabs(dq * rp)\n r = r + dr\n specified_thrust = 0.5 * rotor_swept_area * density * u_ref * u_ref * tc\n\n if MPI.COMM_WORLD.Get_rank() == 0:\n if verbose:\n print(\"thrust coefficient [specified] = \" + str(tc))\n print(\"thrust [specified] = \" + str(specified_thrust))\n print(\"model specified power = \" + str(power) + \" Watts\")\n print(\"target torque = \" + str(target_torque) + \" Joules/rad\")\n\n if MPI.COMM_WORLD.Get_rank() == 0:\n total_power = total_torque * omega\n turbine_name_dict[turbine_name + \"_power\"] = total_power\n turbine_name_dict[turbine_name + \"_uref\"] = u_ref\n turbine_name_dict[turbine_name + \"_omega\"] = omega\n turbine_name_dict[turbine_name + \"_thrust\"] = total_thrust\n\n if verbose:\n print(\"total area = \" + str(total_area) + \" m^2\")\n print(\"turbine power = \" + str(total_power) + \" Watts\")\n print(\"total thrust = \" + str(total_thrust) + \" Newtons\")\n print(\"total torque = \" + str(total_torque) + \" Joules/rad\")\n if not bet_prop:\n print(\n \"% of Betz limit power \"\n + str(old_div(100.0 * total_power, betz_power))\n + \"%\"\n )\n if use_glauert_power:\n print(\n \"% of Glauert optimal power \"\n + str(old_div(100.0 * total_power, glauert_power))\n + \"%\"\n )\n return annulus\n\n\ndef project_to_plane(pt, plane_point, plane_normal):\n from numpy import dot\n\n # print pt,plane_point,plane_normal\n return pt - dot(pt - plane_point, plane_normal) * plane_normal\n\n\n# def clockwise_angle(up_vector, pt_vector, plane_normal):\n# from numpy import zeros, array, dot, linalg, cross\n#\n# v_dot = dot(up_vector, pt_vector)\n# v_det = dot(plane_normal, cross(up_vector, pt_vector))\n#\n# r = math.atan2(v_det, v_dot)\n#\n# if r < 0:\n# r += 2.0 * math.pi\n#\n# return r\n\n\ndef convolution(\n disc,\n disc_centre,\n disc_radius,\n disc_normal,\n disc_up,\n cell_centre_list,\n cell_volume_list,\n):\n from mpi4py import MPI\n import libconvolution as cv\n from numpy import zeros, array, dot, linalg, cross, asarray, ndarray\n\n cell_centre_list_np = asarray(cell_centre_list)\n cell_volume_list_np = asarray(cell_volume_list)\n kernel_3d = False\n\n weighted_sum = np.zeros(len(disc))\n\n weighted_sum = cv.convolution_2dkernel_weights(\n disc,\n disc_centre,\n disc_radius,\n disc_normal,\n disc_up,\n cell_centre_list_np,\n cell_volume_list_np,\n )\n # Need to reduce weighted sum over all processes\n totals = np.zeros_like(weighted_sum)\n\n MPI.COMM_WORLD.Allreduce(weighted_sum, totals, op=MPI.SUM)\n weighted_sum = totals\n\n thrust_check_total = 0\n\n cell_force = np.zeros(len(cell_centre_list_np) * 3)\n thrust_check = cv.convolution_2dkernel_force(\n disc,\n disc_centre,\n disc_radius,\n disc_normal,\n disc_up,\n cell_centre_list_np,\n cell_volume_list_np,\n weighted_sum,\n cell_force,\n )\n\n thrust_check_array = np.array([thrust_check])\n thrust_check_total_array = np.array([0.0])\n\n MPI.COMM_WORLD.Allreduce(thrust_check_array, thrust_check_total_array, op=MPI.SUM)\n thrust_check_total = thrust_check_total_array[0]\n thrust_check = thrust_check_total\n\n # if MPI.COMM_WORLD.Get_rank() == 0:\n # print 'Convolved total thrust: ',thrust_check\n\n # thrust_check = 0.0\n total_thrust = 0.0\n for idx, w in enumerate(weighted_sum):\n segment = disc[idx]\n # if w > 0.0:\n # thrust_check += segment[0]\n total_thrust += segment[0]\n\n # if MPI.COMM_WORLD.Get_rank() == 0:\n # print 'Specified total thrust: ',total_thrust\n\n # Broken: Cell_force_scaled will have a different struct to cell_force\n if thrust_check > 0.0:\n thrust_factor = old_div(total_thrust, thrust_check)\n # if MPI.COMM_WORLD.Get_rank() == 0:\n # print 'Scaling thrust: ', thrust_factor\n cell_force_scaled = []\n for cell in range(old_div(len(cell_force), 3)):\n cell_force_scaled.append(\n (\n cell_force[cell * 3 + 0] * thrust_factor,\n cell_force[cell * 3 + 1] * thrust_factor,\n cell_force[cell * 3 + 2] * thrust_factor,\n )\n )\n return cell_force_scaled\n else:\n cell_force = ndarray.tolist(cell_force)\n cell_array = iter(cell_force)\n return list(zip(cell_array, cell_array, cell_array))\n\n cell_force = ndarray.tolist(cell_force)\n cell_array = iter(cell_force)\n return list(zip(cell_array, cell_array, cell_array))\n\n\ndef convolution2(\n disc,\n disc_centre,\n disc_radius,\n disc_normal,\n disc_up,\n cell_centre_list,\n cell_volume_list,\n):\n from mpi4py import MPI\n\n from numpy import zeros, array, dot, linalg, cross\n\n # from zutil import R_2vect\n\n cell_force = []\n\n thrust_check = 0.0\n\n # Transform disc points to actual location\n disc_pt_list = []\n for segment in disc:\n r = segment[2]\n dr = segment[3]\n theta = segment[4]\n dtheta = segment[5]\n\n disc_r = r + 0.5 * dr\n disc_theta = theta + 0.5 * dtheta\n\n disc_pt = array(\n [disc_r * math.cos(disc_theta), disc_r * math.sin(disc_theta), 0.0]\n )\n # print disc_pt\n # Rotate so that z points in the direction of the normal\n R = zeros((3, 3))\n vector_orig = array([0.0, 0.0, 1.0])\n vector_fin = zeros(3)\n for i in range(3):\n vector_fin[i] = disc_normal[i]\n R_2vect(R, vector_orig, vector_fin)\n\n disc_pt = dot(R, disc_pt)\n\n # translate to disc centre\n for i in range(3):\n disc_pt[i] += disc_centre[i]\n\n disc_pt_list.append(disc_pt)\n\n kernel_3d = False\n\n weighted_sum = np.zeros(len(disc))\n\n for cell_idx, cell_centre in enumerate(cell_centre_list):\n\n cell_dt = [0.0, 0.0, 0.0]\n cell_dq = [0.0, 0.0, 0.0]\n\n if kernel_3d:\n for idx, segment in enumerate(disc):\n dt = segment[0]\n dq = segment[1]\n r = segment[2]\n dr = segment[3]\n theta = segment[4]\n dtheta = segment[5]\n\n disc_r = r + 0.5 * dr\n disc_theta = theta + 0.5 * dtheta\n\n area = dtheta * disc_r * dr\n\n dt_per_area = old_div(dt, area)\n dq_per_area = old_div(dq, area)\n\n disc_pt = disc_pt_list[idx]\n # print disc_pt\n\n distance_vec = array(cell_centre) - disc_pt\n distance = math.sqrt(dot(distance_vec, distance_vec))\n unit_distance_vec = old_div(distance_vec, distance)\n\n epsilon = 2.0 * dr\n\n # 3D kernel\n eta = (\n 1.0\n / ((epsilon ** 2) * math.sqrt(math.pi) ** 3)\n * math.exp(-(old_div(distance, epsilon)) ** 2)\n )\n # /math.fabs(dot(disc_normal,unit_distance_vec))\n\n weighted_sum[idx] += max(1.0e-16, eta * cell_volume_list[cell_idx])\n\n # 1D kernel\n # eta = 1.0/(epsilon*math.sqrt(math.pi)) * math.exp(-(distance/epsilon)**2)\n\n # print eta,cell_centre,disc_pt\n\n # Set thrust force\n # for i in range(3):\n # cell_dt[i] += dt_per_area*disc_normal[i]*eta\n\n # Set torque force\n # Vector for centre to pt\n # disc_vec = disc_pt - array(disc_centre)\n # unit_disc_vec = disc_vec/linalg.norm(disc_vec)\n # torque_vector = cross(unit_disc_vec,disc_normal)\n\n # for i in range(3):\n # cell_dq[i] += dq_per_area*torque_vector[i]*eta\n else:\n # Need for find nearest segment\n\n plane_pt = project_to_plane(\n array(cell_centre), array(disc_centre), array(disc_normal)\n )\n\n # print 'Plane pt: ' + str(plane_pt)\n\n plane_pt_radius = linalg.norm(plane_pt - disc_centre)\n plane_pt_theta = 0.0\n if plane_pt_radius > 0.0:\n # plane_pt_theta = math.acos(dot([0.0,0.0,1.0],(plane_pt-disc_centre)/plane_pt_radius))\n plane_pt_theta = clockwise_angle(\n disc_up,\n old_div((plane_pt - disc_centre), plane_pt_radius),\n array(disc_normal),\n )\n\n min_index = -1\n for idx, segment in enumerate(disc):\n r = segment[2]\n dr = segment[3]\n theta = segment[4]\n dtheta = segment[5]\n\n if plane_pt_theta >= theta and plane_pt_theta <= theta + dtheta:\n if plane_pt_radius >= r and plane_pt_radius <= r + dr:\n min_index = idx\n break\n\n if min_index != -1:\n segment = disc[min_index]\n\n dt = segment[0]\n dq = segment[1]\n r = segment[2]\n dr = segment[3]\n theta = segment[4]\n dtheta = segment[5]\n\n disc_r = r + 0.5 * dr\n disc_theta = theta + 0.5 * dtheta\n\n area = dtheta * disc_r * dr\n\n dt_per_area = old_div(dt, area)\n dq_per_area = old_div(dq, area)\n\n distance_vec = array(cell_centre) - plane_pt\n distance = math.sqrt(dot(distance_vec, distance_vec))\n\n # epsilon = 2.0*dr\n epsilon = 0.2 * disc_radius\n\n # 1D kernel\n eta = (\n 1.0\n / (epsilon * math.sqrt(math.pi))\n * math.exp(-(old_div(distance, epsilon)) ** 2)\n )\n\n # Add max as eta may be zero due to underflow in the exponent\n # term\n weighted_sum[min_index] += max(\n 1.0e-16, eta * cell_volume_list[cell_idx]\n )\n\n # Need to reduce weighted sum over all processes\n totals = np.zeros_like(weighted_sum)\n\n MPI.COMM_WORLD.Allreduce(weighted_sum, totals, op=MPI.SUM)\n weighted_sum = totals\n\n for cell_idx, cell_centre in enumerate(cell_centre_list):\n\n cell_dt = [0.0, 0.0, 0.0]\n cell_dq = [0.0, 0.0, 0.0]\n\n if kernel_3d:\n for idx, segment in enumerate(disc):\n dt = segment[0]\n dq = segment[1]\n r = segment[2]\n dr = segment[3]\n theta = segment[4]\n dtheta = segment[5]\n\n disc_r = r + 0.5 * dr\n disc_theta = theta + 0.5 * dtheta\n\n area = dtheta * disc_r * dr\n\n dt_per_area = old_div(dt, area)\n dq_per_area = old_div(dq, area)\n\n disc_pt = disc_pt_list[idx]\n # print disc_pt\n\n distance_vec = array(cell_centre) - disc_pt\n distance = math.sqrt(dot(distance_vec, distance_vec))\n unit_distance_vec = old_div(distance_vec, distance)\n\n epsilon = 2.0 * dr\n\n # 3D kernel\n eta = (\n 1.0\n / ((epsilon ** 2) * math.sqrt(math.pi) ** 3)\n * math.exp(-(old_div(distance, epsilon)) ** 2)\n )\n # /math.fabs(dot(disc_normal,unit_distance_vec))\n\n redistribution_weight = weighted_sum[idx]\n\n # 1D kernel\n # eta = 1.0/(epsilon*math.sqrt(math.pi)) * math.exp(-(distance/epsilon)**2)\n\n # print eta,cell_centre,disc_pt\n\n # Set thrust force\n for i in range(3):\n cell_dt[i] += (\n old_div(dt_per_area * area, redistribution_weight)\n * disc_normal[i]\n * eta\n )\n\n # Set torque force\n # Vector for centre to pt\n disc_vec = disc_pt - array(disc_centre)\n disc_vec_mag = linalg.norm(disc_vec)\n unit_disc_vec = old_div(disc_vec, disc_vec_mag)\n torque_vector = cross(disc_normal, unit_disc_vec)\n\n # Note converting torque to a force\n for i in range(3):\n cell_dq[i] += old_div(\n dq_per_area * area, redistribution_weight\n ) * old_div(torque_vector[i] * eta / disc_vec_mag)\n else:\n # Need for find nearest segment\n\n plane_pt = project_to_plane(\n array(cell_centre), array(disc_centre), array(disc_normal)\n )\n\n # print 'Plane pt: ' + str(plane_pt)\n\n plane_pt_radius = linalg.norm(plane_pt - disc_centre)\n\n plane_pt_theta = 0.0\n if plane_pt_radius > 0.0:\n # plane_pt_theta = math.acos(dot([0.0,0.0,1.0],(plane_pt-disc_centre)/plane_pt_radius))\n plane_pt_theta = clockwise_angle(\n disc_up,\n old_div((plane_pt - disc_centre), plane_pt_radius),\n array(disc_normal),\n )\n\n min_index = -1\n for idx, segment in enumerate(disc):\n r = segment[2]\n dr = segment[3]\n theta = segment[4]\n dtheta = segment[5]\n\n if plane_pt_theta >= theta and plane_pt_theta <= theta + dtheta:\n if plane_pt_radius >= r and plane_pt_radius <= r + dr:\n min_index = idx\n break\n\n if min_index != -1:\n segment = disc[min_index]\n\n dt = segment[0]\n dq = segment[1]\n r = segment[2]\n dr = segment[3]\n theta = segment[4]\n dtheta = segment[5]\n\n disc_r = r + 0.5 * dr\n disc_theta = theta + 0.5 * dtheta\n\n area = dtheta * disc_r * dr\n\n dt_per_area = old_div(dt, area)\n dq_per_area = old_div(dq, area)\n\n distance_vec = array(cell_centre) - plane_pt\n distance = math.sqrt(dot(distance_vec, distance_vec))\n\n # epsilon = 2.0*dr\n epsilon = 0.2 * disc_radius\n\n # 1D kernel\n eta = (\n 1.0\n / (epsilon * math.sqrt(math.pi))\n * math.exp(-(old_div(distance, epsilon)) ** 2)\n )\n\n redistribution_weight = weighted_sum[min_index]\n\n # print redistribution_weight\n\n # print dt,eta,cell_centre,plane_pt\n\n # Set thrust force\n for i in range(3):\n cell_dt[i] += (\n old_div(dt_per_area * area, redistribution_weight)\n * disc_normal[i]\n * eta\n )\n\n # Set torque force\n # Vector for centre to pt\n disc_vec = plane_pt - array(disc_centre)\n disc_vec_mag = linalg.norm(disc_vec)\n unit_disc_vec = old_div(disc_vec, disc_vec_mag)\n torque_vector = cross(disc_normal, unit_disc_vec)\n\n # Note converting torque to force\n for i in range(3):\n cell_dq[i] += old_div(\n dq_per_area * area, redistribution_weight\n ) * old_div(torque_vector[i] * eta / disc_vec_mag)\n\n cell_force.append(\n (cell_dt[0] + cell_dq[0], cell_dt[1] + cell_dq[1], cell_dt[2] + cell_dq[2])\n )\n\n thrust_check += dot(cell_force[-1], disc_normal) * cell_volume_list[cell_idx]\n\n thrust_check_total = 0\n\n thrust_check_array = np.array([thrust_check])\n thrust_check_total_array = np.array([0.0])\n MPI.COMM_WORLD.Allreduce(thrust_check_array, thrust_check_total_array, op=MPI.SUM)\n thrust_check_total = thrust_check_total_array[0]\n thrust_check = thrust_check_total\n\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"Convolved total thrust: \", thrust_check)\n\n # thrust_check = 0.0\n total_thrust = 0.0\n for idx, w in enumerate(weighted_sum):\n segment = disc[idx]\n # if w > 0.0:\n # thrust_check += segment[0]\n total_thrust += segment[0]\n\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"Specified total thrust: \", total_thrust)\n\n if thrust_check > 0.0:\n thrust_factor = old_div(total_thrust, thrust_check)\n if MPI.COMM_WORLD.Get_rank() == 0:\n print(\"Scaling thrust: \", thrust_factor)\n cell_force_scaled = []\n for cell in cell_force:\n force = cell\n cell_force_scaled.append(\n (\n force[0] * thrust_factor,\n force[1] * thrust_factor,\n force[2] * thrust_factor,\n )\n )\n\n return cell_force_scaled\n else:\n return cell_force\n\n return cell_force\n\n\ndef test_convolution():\n a = create_turbine_segments(0.7, 0.1, 1.0, 6.0, 1, 1)\n b = convolution(a, (0, 0, 0), (1, 0, 0), [(0.0, 0.0, 1.5)], [1.0])\n b = convolution(a, (0, 0, 0), (1, 0, 0), [(0.0, 0.0, 0.99)], [1.0])\n b = convolution(a, (0, 0, 0), (1, 0, 0), [(2.0, 0.0, 0.5)], [1.0])\n\n print(b)\n" ]
[ [ "numpy.degrees", "numpy.zeros_like", "numpy.interp", "numpy.dot", "numpy.zeros", "numpy.reshape", "numpy.cross", "numpy.asarray", "numpy.ndarray.tolist", "numpy.arange", "numpy.max", "numpy.array", "numpy.linspace", "numpy.linalg.norm", "numpy.mean" ] ]
allen91wu/Cirq
[ "c33bd9bd6d08650f41b0db5cf69abb3daed72a8f" ]
[ "cirq-core/cirq/optimizers/cphase_to_fsim.py" ]
[ "# pylint: disable=wrong-or-nonexistent-copyright-notice\nfrom typing import Optional, Sequence, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq import devices, ops, protocols\n\nif TYPE_CHECKING:\n import cirq\n\n\ndef _asinsin(x: float) -> float:\n \"\"\"Computes arcsin(sin(x)) for any x. Return value in [-π/2, π/2].\"\"\"\n k = round(x / np.pi)\n if k % 2 == 0:\n return x - k * np.pi\n return k * np.pi - x\n\n\ndef compute_cphase_exponents_for_fsim_decomposition(\n fsim_gate: 'cirq.FSimGate',\n) -> Sequence[Tuple[float, float]]:\n \"\"\"Returns intervals of CZPowGate exponents valid for FSim decomposition.\n\n Ideal intervals associated with the constraints are closed, but due to\n numerical error the caller should not assume the endpoints themselves\n are valid for the decomposition. See `decompose_cphase_into_two_fsim`\n for details on how FSimGate parameters constrain the phase angle of\n CZPowGate.\n\n Args:\n fsim_gate: FSimGate into which CZPowGate would be decomposed.\n\n Returns:\n Sequence of 2-tuples each consisting of the minimum and maximum\n value of the exponent for which CZPowGate can be decomposed into\n two FSimGates. The intervals are cropped to [0, 2]. The function\n returns zero, one or two intervals.\n \"\"\"\n\n def nonempty_intervals(\n intervals: Sequence[Tuple[float, float]]\n ) -> Sequence[Tuple[float, float]]:\n return tuple((a, b) for a, b in intervals if a < b)\n\n # Each of the two FSimGate parameters sets a bound on phase angle.\n bound1 = abs(_asinsin(fsim_gate.theta))\n bound2 = abs(_asinsin(fsim_gate.phi / 2))\n\n # First potential interval corresponds to the left side of sine's \"hump\".\n min_exponent_1 = 4 * min(bound1, bound2) / np.pi\n max_exponent_1 = 4 * max(bound1, bound2) / np.pi\n assert min_exponent_1 <= max_exponent_1\n\n # Second potential interval corresponds to the right side of sine's \"hump\".\n min_exponent_2 = 2 - max_exponent_1\n max_exponent_2 = 2 - min_exponent_1\n assert min_exponent_2 <= max_exponent_2\n\n # Intervals are disjoint. Return both.\n if max_exponent_1 < min_exponent_2:\n return nonempty_intervals(\n [(min_exponent_1, max_exponent_1), (min_exponent_2, max_exponent_2)]\n )\n if max_exponent_2 < min_exponent_1:\n return nonempty_intervals(\n [(min_exponent_2, max_exponent_2), (min_exponent_1, max_exponent_1)]\n )\n\n # Intervals overlap. Merge.\n min_exponent = min(min_exponent_1, min_exponent_2)\n max_exponent = max(max_exponent_1, max_exponent_2)\n return nonempty_intervals([(min_exponent, max_exponent)])\n\n\ndef decompose_cphase_into_two_fsim(\n cphase_gate: 'cirq.CZPowGate',\n *,\n fsim_gate: 'cirq.FSimGate',\n qubits: Optional[Sequence['cirq.Qid']] = None,\n atol: float = 1e-8,\n) -> 'cirq.OP_TREE':\n \"\"\"Decomposes CZPowGate into two FSimGates.\n\n This function implements the decomposition described in section VII F I\n of https://arxiv.org/abs/1910.11333.\n\n The decomposition results in exactly two FSimGates and a few single-qubit\n rotations. It is feasible if and only if one of the following conditions\n is met:\n\n |sin(θ)| <= |sin(δ/4)| <= |sin(φ/2)|\n |sin(φ/2)| <= |sin(δ/4)| <= |sin(θ)|\n\n where:\n\n θ = fsim_gate.theta,\n φ = fsim_gate.phi,\n δ = -π * cphase_gate.exponent.\n\n Note that the gate parameterizations are non-injective. For the\n decomposition to be feasible it is sufficient that one of the\n parameter values which correspond to the provided gate satisfies\n the constraints. This function will find and use the appropriate\n value whenever it exists.\n\n The constraints above imply that certain FSimGates are not suitable\n for use in this decomposition regardless of the target CZPowGate. We\n reject such gates based on how close |sin(θ)| is to |sin(φ/2)|, see\n atol argument below.\n\n This implementation accounts for the global phase.\n\n Args:\n cphase_gate: The CZPowGate to synthesize.\n fsim_gate: The only two qubit gate that is permitted to appear in the\n output.\n qubits: The qubits to apply the resulting operations to. If not set,\n defaults `cirq.LineQubit.range(2)`.\n atol: Tolerance used to determine whether fsim_gate is valid. The gate\n is invalid if the squares of the sines of the theta angle and half\n the phi angle are too close.\n\n Returns:\n Operations equivalent to cphase_gate and consisting solely of two copies\n of fsim_gate and a few single-qubit rotations.\n\n Raises:\n ValueError: Under any of the following circumstances:\n * cphase_gate or fsim_gate is parametrized,\n * cphase_gate and fsim_gate do not satisfy the conditions above,\n * fsim_gate has invalid angles (see atol argument above),\n * incorrect number of qubits are provided.\n \"\"\"\n if protocols.is_parameterized(cphase_gate):\n raise ValueError('Cannot decompose a parametrized gate.')\n if protocols.is_parameterized(fsim_gate):\n raise ValueError('Cannot decompose into a parametrized gate.')\n if qubits is None:\n qubits = devices.LineQubit.range(2)\n if len(qubits) != 2:\n raise ValueError(f'Expected a pair of qubits, but got {qubits!r}.')\n q0, q1 = qubits\n\n theta = fsim_gate.theta\n phi = fsim_gate.phi\n\n sin_half_phi = np.sin(phi / 2)\n cos_half_phi = np.cos(phi / 2)\n sin_theta = np.sin(theta)\n cos_theta = np.cos(theta)\n\n #\n # Step 1: find alpha\n #\n denominator = (sin_theta - sin_half_phi) * (sin_theta + sin_half_phi)\n if abs(denominator) < atol:\n raise ValueError(\n f'{fsim_gate} cannot be used to decompose CZPowGate because '\n 'sin(theta)**2 is too close to sin(phi/2)**2 '\n f'(difference is {denominator}).'\n )\n\n # Parametrization of CZPowGate by a real angle is a non-injective function\n # with the preimage of cphase_gate infinite. However, it is sufficient to\n # check just two of the angles against the constraints of the decomposition.\n canonical_delta = -np.pi * (cphase_gate.exponent % 2)\n for delta in (canonical_delta, canonical_delta + 2 * np.pi):\n sin_quarter_delta = np.sin(delta / 4)\n numerator = (sin_quarter_delta - sin_half_phi) * (sin_quarter_delta + sin_half_phi)\n sin_alpha_squared = numerator / denominator\n if 0 <= sin_alpha_squared <= 1:\n break\n else:\n intervals = compute_cphase_exponents_for_fsim_decomposition(fsim_gate)\n raise ValueError(\n f'{cphase_gate} cannot be decomposed into two {fsim_gate}. Valid '\n f'intervals for canonical exponent of CZPowGate: {intervals}.'\n )\n assert 0 <= sin_alpha_squared <= 1\n alpha = np.arcsin(np.sqrt(sin_alpha_squared))\n\n #\n # Step 2: find xi and eta\n #\n tan_alpha = np.tan(alpha)\n xi = np.arctan2(tan_alpha * cos_theta, cos_half_phi)\n eta = np.arctan2(tan_alpha * sin_theta, sin_half_phi)\n if delta < 0:\n eta += np.pi\n\n #\n # Step 3: synthesize output circuit\n #\n return (\n # Local X rotations to convert Γ1⊗I − iZ⊗Γ2 into exp(-i Z⊗Z δ/4)\n ops.rx(xi).on(q0),\n ops.rx(eta).on(q1),\n # Y(θ, φ) := exp(-i X⊗X θ/2) exp(-i Y⊗Y θ/2) exp(-i Z⊗Z φ/4)\n fsim_gate.on(q0, q1),\n ops.rz(phi / 2).on(q0),\n ops.rz(phi / 2).on(q1),\n ops.global_phase_operation(np.exp(1j * phi / 4)),\n # exp(i X1 α)\n ops.rx(-2 * alpha).on(q0),\n # Y(-θ, φ) := exp(i X⊗X θ/2) exp(i Y⊗Y θ/2) exp(-i Z⊗Z φ/4)\n ops.Z(q0),\n fsim_gate.on(q0, q1),\n ops.rz(phi / 2).on(q0),\n ops.rz(phi / 2).on(q1),\n ops.global_phase_operation(np.exp(1j * phi / 4)),\n ops.Z(q0),\n # Local X rotations to convert Γ1⊗I − iZ⊗Γ2 into exp(-i Z⊗Z δ/4)\n ops.rx(-eta).on(q1),\n ops.rx(xi).on(q0),\n # Local Z rotations to convert exp(-i Z⊗Z δ/4) into desired CPhase.\n ops.rz(-delta / 2).on(q0),\n ops.rz(-delta / 2).on(q1),\n ops.global_phase_operation(np.exp(-1j * delta / 4)),\n )\n" ]
[ [ "numpy.arctan2", "numpy.cos", "numpy.exp", "numpy.tan", "numpy.sqrt", "numpy.sin" ] ]
JamesPerlman/Dain-App
[ "f589abdca8309cfdb6dd106da7c7c4613d152c72" ]
[ "Resblock/BasicBlock.py" ]
[ "import torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nimport torch.nn.init as weight_init\nimport torch\n__all__ = ['MultipleBasicBlock','MultipleBasicBlock_4']\ndef conv3x3(in_planes, out_planes, dilation = 1, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=int(dilation*(3-1)/2), dilation=dilation, bias=False)\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, dilation = 1, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes,dilation, stride)\n # self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n # self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n # weight_init.xavier_normal()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n # out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n # out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\nclass MultipleBasicBlock(nn.Module):\n\n def __init__(self,input_feature,\n block, num_blocks,\n intermediate_feature = 64, dense = True):\n super(MultipleBasicBlock, self).__init__()\n self.dense = dense\n self.num_block = num_blocks\n self.intermediate_feature = intermediate_feature\n\n self.block1= nn.Sequential(*[\n nn.Conv2d(input_feature, intermediate_feature,\n kernel_size=7, stride=1, padding=3, bias=False),\n nn.ReLU(inplace=True)\n ])\n\n # for i in range(1, num_blocks):\n self.block2 = block(intermediate_feature, intermediate_feature, dilation = 1) if num_blocks>=2 else None\n self.block3 = block(intermediate_feature, intermediate_feature, dilation = 1) if num_blocks>=3 else None\n self.block4 = block(intermediate_feature, intermediate_feature, dilation = 1) if num_blocks>=4 else None\n self.block5 = nn.Sequential(*[nn.Conv2d(intermediate_feature, 3 , (3, 3), 1, (1, 1))])\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n x = self.block1(x)\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n x = self.block5(x)\n return x\n\ndef MultipleBasicBlock_4(input_feature,intermediate_feature = 64):\n model = MultipleBasicBlock(input_feature,\n BasicBlock,4 ,\n intermediate_feature)\n return model\n\n\nif __name__ == '__main__':\n\n # x= Variable(torch.randn(2,3,224,448))\n # model = S2DF(BasicBlock,3,True)\n # y = model(x)\n model = MultipleBasicBlock(200, BasicBlock,4)\n model = BasicBlock(64,64,1)\n # y = model(x)\n exit(0)" ]
[ [ "torch.nn.ReLU", "torch.nn.Conv2d" ] ]
qualichat/qualitube
[ "6d1b9b67cc3a6cc41f5d3a6ecfd2d66b3f65c72e" ]
[ "qualitube/video.py" ]
[ "\"\"\"\nMIT License\n\nCopyright (c) 2021 Vítor Mussa\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nfrom typing import Dict, Any, Optional, List\n\nfrom pandas import DataFrame\n\n\n__all__ = ('Video',)\n\n\ndef _try_parse_int(value: Optional[str]) -> Optional[int]:\n if value is None:\n return None\n\n return int(value)\n\n\nclass Video:\n \"\"\"Represents a dataset from a video returned by the Google API.\"\"\"\n\n __slots__ = (\n 'id', 'title', 'description', 'tags', 'view_count', 'like_count',\n 'dislike_count', 'favorite_count', 'comment_count'\n )\n\n def __init__(self, payload: Dict[str, Any]) -> None:\n self.id: Optional[str] = payload.get('id', None)\n\n snippets = payload.get('snippet', {})\n self.title: Optional[str] = snippets.get('title', None)\n self.description: Optional[str] = snippets.get('description', None)\n self.tags: Optional[List[str]] = snippets.get('tags', None)\n\n # Statistics\n statistics = payload.get('statistics', {})\n\n view_count = _try_parse_int(statistics.get('viewCount'))\n self.view_count: Optional[int] = view_count\n\n like_count = _try_parse_int(statistics.get('likeCount'))\n self.like_count = like_count\n\n dislike_count = _try_parse_int(statistics.get('dislikeCount'))\n self.dislike_count = dislike_count\n\n favorite_count = _try_parse_int(statistics.get('favoriteCount'))\n self.favorite_count = favorite_count\n\n comment_count = _try_parse_int(statistics.get('commentCount'))\n self.comment_count = comment_count\n\n def __repr__(self) -> str:\n return f'<Video id={self.id!r}>'\n\n\nclass VideosResponse:\n \"\"\"Represents a response from a Google API video dataset.\"\"\"\n\n __slots__ = ('videos')\n\n def __init__(self, videos: List[Video]) -> None:\n self.videos: List[Video] = videos\n\n def __repr__(self) -> str:\n return f'<VideosResponse videos={self.videos}>'\n\n def to_dataframe(self) -> DataFrame:\n \"\"\"Transforms the video dataset into a dataframe.\"\"\"\n rows: List[List[Any]] = []\n columns = Video.__slots__\n\n for video in self.videos:\n rows.append([\n video.id, video.title, video.description, video.tags, video.view_count,\n video.like_count, video.dislike_count, video.favorite_count,\n video.comment_count\n ])\n \n return DataFrame(rows, columns=columns) # type: ignore\n" ]
[ [ "pandas.DataFrame" ] ]
MikhailKitikov/DrivingMonitor
[ "0b698d1ba644ce74e1c7d88c3e18a1ef997aabc0" ]
[ "road situation analysis/research/road/state estimation/sdc/linear_movement_model.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom .timestamp import Timestamp\nfrom .movement_model_base import MovementModelBase\n\n\nclass LinearMovementModel(MovementModelBase):\n \"\"\"Продвигает автомобиль вперед с его текущей скоростью\"\"\"\n def __init__(self, *args, **kwargs):\n super(LinearMovementModel, self).__init__(*args, **kwargs)\n\n def _move(self, dt):\n assert isinstance(dt, Timestamp)\n self._car._state = self.move_state(self._car._state, dt)\n self._car._time = self._car._time + dt\n\n def move_state(self, state, dt):\n assert isinstance(dt, Timestamp)\n car = self._car\n state_size = self._car._state_size\n assert state.shape[0] == state_size\n dt_sec = dt.to_seconds()\n x = state[car.POS_X_INDEX]\n y = state[car.POS_Y_INDEX]\n yaw = state[car.YAW_INDEX]\n vel = state[car.VEL_INDEX]\n omega = state[car.OMEGA_INDEX]\n new_state = np.zeros_like(state)\n new_state[car.POS_X_INDEX] = x + vel * np.cos(yaw) * dt_sec\n new_state[car.POS_Y_INDEX] = y + vel * np.sin(yaw) * dt_sec\n new_state[car.YAW_INDEX] = yaw + omega * dt_sec\n new_state[car.VEL_INDEX] = vel\n new_state[car.OMEGA_INDEX] = omega\n return new_state\n" ]
[ [ "numpy.sin", "numpy.zeros_like", "numpy.cos" ] ]
ErlingLie/cvat
[ "f053d14955b1e48bf6498466949f4beb1833fe8e" ]
[ "utils/auto_upload/track_on_videos.py" ]
[ "import numpy as np\nimport sys\nimport time\nimport os\nimport json\nimport argparse\n\nfrom sort.sort import Sort\n\ndef track_from_detections(detection_path, output_path, num_classes):\n '''\n Runs tracking on detections and saves result to a json file on form list(class_db)\n Where each class_db is on the form:\n - track id\n - list of bboxes on form [x0, y0, x1, y1, frame_nmbr]\n '''\n trackers = []\n #Instanciate one tracker per class\n for i in range(num_classes):\n tracker = Sort(5,1)\n trackers.append(tracker)\n\n with open(detection_path, \"r\") as json_file:\n data = json.load(json_file)\n tracks = [{} for i in range(num_classes)]\n for frame_nmbr, frame_detections in data.items():\n frame_nmbr = int(frame_nmbr)\n for class_id, class_detections in frame_detections.items():\n class_id = int(class_id)\n class_tracks = trackers[class_id].update(np.array(class_detections))\n for track in class_tracks: #Tracks are on form [x0, y0, x1, y1, id]\n if int(track[-1]) in tracks[class_id].keys():\n tracks[class_id][int(track[-1])].append(track[:4].tolist() + [frame_nmbr])\n else:\n tracks[class_id][int(track[-1])] = [track[:4].tolist() + [frame_nmbr]]\n\n with open(output_path, \"w\") as json_file:\n json.dump(tracks, json_file)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Interface for running detection on videos\")\n parser.add_argument(\"input\", type=str, help = \"Directory for input jsons\")\n parser.add_argument(\"output\", type=str, help = \"Directory for output jsons\")\n parser.add_argument(\"--num_classes\", default=1, type=int, help=\"Number of classes in dataset\")\n args = parser.parse_args()\n\n if not os.path.exists(args.output):\n os.mkdir(args.output)\n print(\"Directory\" , args.output , \"Created \")\n detections = os.listdir(args.input)\n for detection in detections:\n output_path = os.path.join(args.output, detection)\n input_path = os.path.join(args.input, detection)\n print(\"Starting tracking on file: \" + output_path)\n track_from_detections(input_path, output_path, args.num_classes)" ]
[ [ "numpy.array" ] ]
Koomook/nsmc
[ "64fb83769072be3822f663383d0855dd66c92855" ]
[ "code/partition.py" ]
[ "#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport numpy as np; np.random.seed(1234)\nimport pandas as pd\n\n\nntrain = 150000\n\ndata = pd.read_csv('../ratings.txt', sep='\\t', quoting=3)\ndata = pd.DataFrame(np.random.permutation(data))\ntrn, tst = data[:ntrain], data[ntrain:]\n\nheader = 'id document label'.split()\ntrn.to_csv('../ratings_train.txt', sep='\\t', index=False, header=header)\ntst.to_csv('../ratings_test.txt', sep='\\t', index=False, header=header)\n" ]
[ [ "pandas.read_csv", "numpy.random.seed", "numpy.random.permutation" ] ]